From c7a990eb212e959144faee3b167fd0730a119a4c Mon Sep 17 00:00:00 2001 From: Ryota Date: Sat, 11 Oct 2025 23:55:14 +0100 Subject: [PATCH 01/28] Add etcd env variable handling --- .../controller/etcd/etcd_env.go | 120 ++++++++ .../controller/etcd/etcd_env_test.go | 286 ++++++++++++++++++ 2 files changed, 406 insertions(+) create mode 100644 pkg/resource-handler/controller/etcd/etcd_env.go create mode 100644 pkg/resource-handler/controller/etcd/etcd_env_test.go diff --git a/pkg/resource-handler/controller/etcd/etcd_env.go b/pkg/resource-handler/controller/etcd/etcd_env.go new file mode 100644 index 00000000..05b1a8c7 --- /dev/null +++ b/pkg/resource-handler/controller/etcd/etcd_env.go @@ -0,0 +1,120 @@ +package etcd + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" +) + +// BuildEtcdEnv constructs all environment variables for etcd clustering in +// StatefulSets. This combines pod identity, etcd config, and cluster peer +// discovery details. +func BuildEtcdEnv(etcdName, namespace string, replicas int32, serviceName string) []corev1.EnvVar { + envVars := make([]corev1.EnvVar, 0) + + // Add pod identity variables from downward API + envVars = append(envVars, buildPodIdentityEnv()...) + + // Add etcd configuration variables + envVars = append(envVars, buildEtcdConfigEnv(etcdName, serviceName, namespace)...) + + // Add the initial cluster peer list + clusterPeerList := buildEtcdClusterPeerList(etcdName, serviceName, namespace, replicas) + envVars = append(envVars, corev1.EnvVar{ + Name: "ETCD_INITIAL_CLUSTER", + Value: clusterPeerList, + }) + + return envVars +} + +// buildPodIdentityEnv creates environment variables for pod name and namespace. +// These are required for etcd to construct its advertise URLs in StatefulSets, +// and this association of both Pod name and namespace are common. +// +// Ref: https://etcd.io/docs/latest/op-guide/clustering/ +func buildPodIdentityEnv() []corev1.EnvVar { + return []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + } +} + +// buildEtcdConfigEnv creates etcd configuration environment variables. +// These configure etcd's network endpoints and cluster formation. +// +// Ref: https://etcd.io/docs/latest/op-guide/configuration/ +func buildEtcdConfigEnv(etcdName, serviceName, namespace string) []corev1.EnvVar { + return []corev1.EnvVar{ + { + Name: "ETCD_NAME", + Value: "$(POD_NAME)", + }, + { + Name: "ETCD_DATA_DIR", + Value: "/var/lib/etcd", + }, + { + Name: "ETCD_LISTEN_CLIENT_URLS", + Value: "http://0.0.0.0:2379", + }, + { + Name: "ETCD_LISTEN_PEER_URLS", + Value: "http://0.0.0.0:2380", + }, + { + Name: "ETCD_ADVERTISE_CLIENT_URLS", + Value: fmt.Sprintf("http://$(POD_NAME).%s.$(POD_NAMESPACE).svc.cluster.local:2379", serviceName), + }, + { + Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", + Value: fmt.Sprintf("http://$(POD_NAME).%s.$(POD_NAMESPACE).svc.cluster.local:2380", serviceName), + }, + { + Name: "ETCD_INITIAL_CLUSTER_STATE", + Value: "new", + }, + { + Name: "ETCD_INITIAL_CLUSTER_TOKEN", + Value: etcdName, + }, + } +} + +// buildEtcdClusterPeerList generates the initial cluster member list for +// bootstrap. This tells each etcd member about all other members during cluster +// formation. +// +// Format: member-0=http://member-0.service.ns.svc.cluster.local:2380,... +// +// Ref: https://etcd.io/docs/latest/op-guide/clustering/#static +func buildEtcdClusterPeerList(etcdName, serviceName, namespace string, replicas int32) string { + if replicas < 0 { + return "" + } + + peers := make([]string, 0, replicas) + for i := range replicas { + podName := fmt.Sprintf("%s-%d", etcdName, i) + peerURL := fmt.Sprintf("%s=http://%s.%s.%s.svc.cluster.local:2380", + podName, podName, serviceName, namespace) + peers = append(peers, peerURL) + } + + return strings.Join(peers, ",") +} diff --git a/pkg/resource-handler/controller/etcd/etcd_env_test.go b/pkg/resource-handler/controller/etcd/etcd_env_test.go new file mode 100644 index 00000000..ce40bd82 --- /dev/null +++ b/pkg/resource-handler/controller/etcd/etcd_env_test.go @@ -0,0 +1,286 @@ +package etcd + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" +) + +func TestBuildPodIdentityEnv(t *testing.T) { + got := buildPodIdentityEnv() + + want := []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("buildPodIdentityEnv() mismatch (-want +got):\n%s", diff) + } +} + +func TestBuildEtcdConfigEnv(t *testing.T) { + tests := map[string]struct { + etcdName string + serviceName string + namespace string + want []corev1.EnvVar + }{ + "basic configuration": { + etcdName: "my-etcd", + serviceName: "my-etcd-headless", + namespace: "default", + want: []corev1.EnvVar{ + {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, + {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, + {Name: "ETCD_LISTEN_CLIENT_URLS", Value: "http://0.0.0.0:2379"}, + {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, + {Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: "http://$(POD_NAME).my-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379"}, + {Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: "http://$(POD_NAME).my-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380"}, + {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "my-etcd"}, + }, + }, + "different namespace": { + etcdName: "test-etcd", + serviceName: "test-etcd-headless", + namespace: "production", + want: []corev1.EnvVar{ + {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, + {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, + {Name: "ETCD_LISTEN_CLIENT_URLS", Value: "http://0.0.0.0:2379"}, + {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, + {Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: "http://$(POD_NAME).test-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379"}, + {Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: "http://$(POD_NAME).test-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380"}, + {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "test-etcd"}, + }, + }, + "long names": { + etcdName: "very-long-etcd-cluster-name", + serviceName: "very-long-etcd-cluster-name-headless", + namespace: "kube-system", + want: []corev1.EnvVar{ + {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, + {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, + {Name: "ETCD_LISTEN_CLIENT_URLS", Value: "http://0.0.0.0:2379"}, + {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, + {Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: "http://$(POD_NAME).very-long-etcd-cluster-name-headless.$(POD_NAMESPACE).svc.cluster.local:2379"}, + {Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: "http://$(POD_NAME).very-long-etcd-cluster-name-headless.$(POD_NAMESPACE).svc.cluster.local:2380"}, + {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "very-long-etcd-cluster-name"}, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildEtcdConfigEnv(tc.etcdName, tc.serviceName, tc.namespace) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildEtcdConfigEnv() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildEtcdClusterPeerList(t *testing.T) { + tests := map[string]struct { + etcdName string + serviceName string + namespace string + replicas int32 + want string + }{ + "single replica": { + etcdName: "my-etcd", + serviceName: "my-etcd-headless", + namespace: "default", + replicas: 1, + want: "my-etcd-0=http://my-etcd-0.my-etcd-headless.default.svc.cluster.local:2380", + }, + "three replicas (typical HA)": { + etcdName: "my-etcd", + serviceName: "my-etcd-headless", + namespace: "default", + replicas: 3, + want: "my-etcd-0=http://my-etcd-0.my-etcd-headless.default.svc.cluster.local:2380,my-etcd-1=http://my-etcd-1.my-etcd-headless.default.svc.cluster.local:2380,my-etcd-2=http://my-etcd-2.my-etcd-headless.default.svc.cluster.local:2380", + }, + "five replicas": { + etcdName: "etcd-prod", + serviceName: "etcd-prod-headless", + namespace: "production", + replicas: 5, + want: "etcd-prod-0=http://etcd-prod-0.etcd-prod-headless.production.svc.cluster.local:2380,etcd-prod-1=http://etcd-prod-1.etcd-prod-headless.production.svc.cluster.local:2380,etcd-prod-2=http://etcd-prod-2.etcd-prod-headless.production.svc.cluster.local:2380,etcd-prod-3=http://etcd-prod-3.etcd-prod-headless.production.svc.cluster.local:2380,etcd-prod-4=http://etcd-prod-4.etcd-prod-headless.production.svc.cluster.local:2380", + }, + "zero replicas": { + etcdName: "my-etcd", + serviceName: "my-etcd-headless", + namespace: "default", + replicas: 0, + want: "", + }, + "negative replicas": { + etcdName: "my-etcd", + serviceName: "my-etcd-headless", + namespace: "default", + replicas: -1, + want: "", + }, + "different namespace": { + etcdName: "kube-etcd", + serviceName: "kube-etcd-headless", + namespace: "kube-system", + replicas: 3, + want: "kube-etcd-0=http://kube-etcd-0.kube-etcd-headless.kube-system.svc.cluster.local:2380,kube-etcd-1=http://kube-etcd-1.kube-etcd-headless.kube-system.svc.cluster.local:2380,kube-etcd-2=http://kube-etcd-2.kube-etcd-headless.kube-system.svc.cluster.local:2380", + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildEtcdClusterPeerList(tc.etcdName, tc.serviceName, tc.namespace, tc.replicas) + if got != tc.want { + t.Errorf("buildEtcdClusterPeerList() = %v, want %v", got, tc.want) + } + }) + } +} + +func TestBuildEtcdEnv(t *testing.T) { + tests := map[string]struct { + etcdName string + namespace string + replicas int32 + serviceName string + want []corev1.EnvVar + }{ + "complete environment with 3 replicas": { + etcdName: "my-etcd", + namespace: "default", + replicas: 3, + serviceName: "my-etcd-headless", + want: []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, + {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, + {Name: "ETCD_LISTEN_CLIENT_URLS", Value: "http://0.0.0.0:2379"}, + {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, + {Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: "http://$(POD_NAME).my-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379"}, + {Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: "http://$(POD_NAME).my-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380"}, + {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "my-etcd"}, + {Name: "ETCD_INITIAL_CLUSTER", Value: "my-etcd-0=http://my-etcd-0.my-etcd-headless.default.svc.cluster.local:2380,my-etcd-1=http://my-etcd-1.my-etcd-headless.default.svc.cluster.local:2380,my-etcd-2=http://my-etcd-2.my-etcd-headless.default.svc.cluster.local:2380"}, + }, + }, + "single replica": { + etcdName: "test-etcd", + namespace: "test", + replicas: 1, + serviceName: "test-etcd-headless", + want: []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, + {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, + {Name: "ETCD_LISTEN_CLIENT_URLS", Value: "http://0.0.0.0:2379"}, + {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, + {Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: "http://$(POD_NAME).test-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379"}, + {Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: "http://$(POD_NAME).test-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380"}, + // Cluster setup won't happen in a single cluster, and these + // env variables are only used at startup. + {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "test-etcd"}, + {Name: "ETCD_INITIAL_CLUSTER", Value: "test-etcd-0=http://test-etcd-0.test-etcd-headless.test.svc.cluster.local:2380"}, + }, + }, + "zero replicas - no ETCD_INITIAL_CLUSTER": { + etcdName: "empty-etcd", + namespace: "default", + replicas: 0, + serviceName: "empty-etcd-headless", + want: []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, + {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, + {Name: "ETCD_LISTEN_CLIENT_URLS", Value: "http://0.0.0.0:2379"}, + {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, + {Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: "http://$(POD_NAME).empty-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379"}, + {Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: "http://$(POD_NAME).empty-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380"}, + // Cluster setup won't happen in a single cluster, and these + // env variables are only used at startup. In case of scaling up + // from zero replica, the updated env variable will be picked up + // correctly, and thus an empty variable like this will be OK. + {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "empty-etcd"}, + {Name: "ETCD_INITIAL_CLUSTER"}, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := BuildEtcdEnv(tc.etcdName, tc.namespace, tc.replicas, tc.serviceName) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildEtcdEnv() mismatch (-want +got):\n%s", diff) + } + }) + } +} From a7abcfdf1e8c7af011f5e77d3d1edc3517ffa9f2 Mon Sep 17 00:00:00 2001 From: Ryota Date: Sun, 12 Oct 2025 01:47:52 +0100 Subject: [PATCH 02/28] Add necessary module dependencies --- pkg/resource-handler/go.mod | 41 +++++++++++++- pkg/resource-handler/go.sum | 105 ++++++++++++++++++++++++++++++++++++ 2 files changed, 144 insertions(+), 2 deletions(-) diff --git a/pkg/resource-handler/go.mod b/pkg/resource-handler/go.mod index 012c27ad..34835b4f 100644 --- a/pkg/resource-handler/go.mod +++ b/pkg/resource-handler/go.mod @@ -4,26 +4,63 @@ go 1.25.0 require ( github.com/google/go-cmp v0.7.0 + github.com/numtide/multigres-operator/api v0.0.0-20251010210109-0981dc938d36 k8s.io/api v0.34.1 + k8s.io/apimachinery v0.34.1 + sigs.k8s.io/controller-runtime v0.22.3 ) require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kr/text v0.2.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/x448/float16 v0.8.4 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.9.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - k8s.io/apimachinery v0.34.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.34.1 // indirect + k8s.io/client-go v0.34.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/pkg/resource-handler/go.sum b/pkg/resource-handler/go.sum index df4088f6..4ee119d3 100644 --- a/pkg/resource-handler/go.sum +++ b/pkg/resource-handler/go.sum @@ -1,46 +1,124 @@ +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/numtide/multigres-operator/api v0.0.0-20251010210109-0981dc938d36 h1:OJyNexfsX8SjGCG2JJBzNT8zThATaiu/MClD9TvCCdw= +github.com/numtide/multigres-operator/api v0.0.0-20251010210109-0981dc938d36/go.mod h1:zvjoyJ/6V8IOFLCcsJ1PwixfYPfpgtHUrPd9+N0/nI8= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -52,39 +130,66 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= +k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= +k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= +k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.22.3 h1:I7mfqz/a/WdmDCEnXmSPm8/b/yRTy6JsKKENTijTq8Y= +sigs.k8s.io/controller-runtime v0.22.3/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= From 24228f4481acd952d2f9b4fa583c412e27bc6cd7 Mon Sep 17 00:00:00 2001 From: Ryota Date: Sun, 12 Oct 2025 01:52:01 +0100 Subject: [PATCH 03/28] Correct function scope --- pkg/resource-handler/controller/etcd/etcd_env.go | 4 ++-- pkg/resource-handler/controller/etcd/etcd_env_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/resource-handler/controller/etcd/etcd_env.go b/pkg/resource-handler/controller/etcd/etcd_env.go index 05b1a8c7..c8ce2db3 100644 --- a/pkg/resource-handler/controller/etcd/etcd_env.go +++ b/pkg/resource-handler/controller/etcd/etcd_env.go @@ -7,10 +7,10 @@ import ( corev1 "k8s.io/api/core/v1" ) -// BuildEtcdEnv constructs all environment variables for etcd clustering in +// buildEtcdEnv constructs all environment variables for etcd clustering in // StatefulSets. This combines pod identity, etcd config, and cluster peer // discovery details. -func BuildEtcdEnv(etcdName, namespace string, replicas int32, serviceName string) []corev1.EnvVar { +func buildEtcdEnv(etcdName, namespace string, replicas int32, serviceName string) []corev1.EnvVar { envVars := make([]corev1.EnvVar, 0) // Add pod identity variables from downward API diff --git a/pkg/resource-handler/controller/etcd/etcd_env_test.go b/pkg/resource-handler/controller/etcd/etcd_env_test.go index ce40bd82..38ce47ac 100644 --- a/pkg/resource-handler/controller/etcd/etcd_env_test.go +++ b/pkg/resource-handler/controller/etcd/etcd_env_test.go @@ -277,7 +277,7 @@ func TestBuildEtcdEnv(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - got := BuildEtcdEnv(tc.etcdName, tc.namespace, tc.replicas, tc.serviceName) + got := buildEtcdEnv(tc.etcdName, tc.namespace, tc.replicas, tc.serviceName) if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("BuildEtcdEnv() mismatch (-want +got):\n%s", diff) } From 442c7c24a7c00575189b7b6c3f475d132e7e8458 Mon Sep 17 00:00:00 2001 From: Ryota Date: Sun, 12 Oct 2025 01:52:14 +0100 Subject: [PATCH 04/28] Add port setup for etcd --- pkg/resource-handler/controller/etcd/ports.go | 62 ++++++++++ .../controller/etcd/ports_test.go | 106 ++++++++++++++++++ 2 files changed, 168 insertions(+) create mode 100644 pkg/resource-handler/controller/etcd/ports.go create mode 100644 pkg/resource-handler/controller/etcd/ports_test.go diff --git a/pkg/resource-handler/controller/etcd/ports.go b/pkg/resource-handler/controller/etcd/ports.go new file mode 100644 index 00000000..6c36d03b --- /dev/null +++ b/pkg/resource-handler/controller/etcd/ports.go @@ -0,0 +1,62 @@ +package etcd + +import ( + corev1 "k8s.io/api/core/v1" +) + +const ( + // ClientPort is the default port for etcd client connections. + ClientPort = 2379 + + // PeerPort is the default port for etcd peer connections. + PeerPort = 2380 +) + +// PortOption configures port settings for etcd containers. +type PortOption func(*portOptions) + +type portOptions struct { + clientPort int32 + peerPort int32 +} + +// WithClientPort overrides the default client port. +func WithClientPort(port int32) PortOption { + return func(o *portOptions) { + o.clientPort = port + } +} + +// WithPeerPort overrides the default peer port. +func WithPeerPort(port int32) PortOption { + return func(o *portOptions) { + o.peerPort = port + } +} + +// buildContainerPorts creates the port definitions for the etcd container. +// These ports are used in both the StatefulSet container spec and Service +// definitions. +func buildContainerPorts(opts ...PortOption) []corev1.ContainerPort { + options := &portOptions{ + clientPort: ClientPort, + peerPort: PeerPort, + } + + for _, opt := range opts { + opt(options) + } + + return []corev1.ContainerPort{ + { + Name: "client", + ContainerPort: options.clientPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + ContainerPort: options.peerPort, + Protocol: corev1.ProtocolTCP, + }, + } +} diff --git a/pkg/resource-handler/controller/etcd/ports_test.go b/pkg/resource-handler/controller/etcd/ports_test.go new file mode 100644 index 00000000..1fa96123 --- /dev/null +++ b/pkg/resource-handler/controller/etcd/ports_test.go @@ -0,0 +1,106 @@ +package etcd + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" +) + +func TestBuildContainerPorts(t *testing.T) { + tests := map[string]struct { + opts []PortOption + want []corev1.ContainerPort + }{ + "default ports": { + opts: nil, + want: []corev1.ContainerPort{ + { + Name: "client", + ContainerPort: 2379, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + ContainerPort: 2380, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + "custom client port": { + opts: []PortOption{WithClientPort(3379)}, + want: []corev1.ContainerPort{ + { + Name: "client", + ContainerPort: 3379, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + ContainerPort: 2380, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + "custom peer port": { + opts: []PortOption{WithPeerPort(3380)}, + want: []corev1.ContainerPort{ + { + Name: "client", + ContainerPort: 2379, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + ContainerPort: 3380, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + "both ports customized": { + opts: []PortOption{ + WithClientPort(9379), + WithPeerPort(9380), + }, + want: []corev1.ContainerPort{ + { + Name: "client", + ContainerPort: 9379, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + ContainerPort: 9380, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + "zero port values - should use zero": { + opts: []PortOption{ + WithClientPort(0), + WithPeerPort(0), + }, + want: []corev1.ContainerPort{ + { + Name: "client", + ContainerPort: 0, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + ContainerPort: 0, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildContainerPorts(tc.opts...) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildContainerPorts() mismatch (-want +got):\n%s", diff) + } + }) + } +} From 18ab0b5a09c3b7292b3b5c4983f363bffb4ac61a Mon Sep 17 00:00:00 2001 From: Ryota Date: Sun, 12 Oct 2025 01:52:27 +0100 Subject: [PATCH 05/28] Add StatefulSet definition --- .../controller/etcd/statefulset.go | 133 +++++++++ .../controller/etcd/statefulset_test.go | 266 ++++++++++++++++++ 2 files changed, 399 insertions(+) create mode 100644 pkg/resource-handler/controller/etcd/statefulset.go create mode 100644 pkg/resource-handler/controller/etcd/statefulset_test.go diff --git a/pkg/resource-handler/controller/etcd/statefulset.go b/pkg/resource-handler/controller/etcd/statefulset.go new file mode 100644 index 00000000..d8e3f7c2 --- /dev/null +++ b/pkg/resource-handler/controller/etcd/statefulset.go @@ -0,0 +1,133 @@ +package etcd + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/metadata" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/storage" +) + +const ( + // ComponentName is the component label value for etcd resources + ComponentName = "etcd" + + // DefaultReplicas is the default number of etcd replicas + DefaultReplicas int32 = 3 + + // DefaultImage is the default etcd container image + DefaultImage = "gcr.io/etcd-development/etcd:v3.5.9" + + // DefaultStorageSize is the default storage size for etcd data + DefaultStorageSize = "10Gi" + + // DataVolumeName is the name of the data volume + DataVolumeName = "data" + + // DataMountPath is the mount path for etcd data + DataMountPath = "/var/lib/etcd" +) + +// BuildStatefulSet creates a StatefulSet for the Etcd cluster. +// Returns a deterministic StatefulSet based on the Etcd spec. +func BuildStatefulSet(etcd *multigresv1alpha1.Etcd, scheme *runtime.Scheme) (*appsv1.StatefulSet, error) { + replicas := DefaultReplicas + // TODO: Debatable whether this defaulting makes sense. + if etcd.Spec.Replicas != nil { + replicas = *etcd.Spec.Replicas + } + + image := DefaultImage + if etcd.Spec.Image != "" { + image = etcd.Spec.Image + } + + headlessServiceName := etcd.Name + "-headless" + labels := metadata.BuildStandardLabels(etcd.Name, ComponentName, etcd.Spec.CellName) + podLabels := metadata.MergeLabels(labels, etcd.Spec.PodLabels) + + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: etcd.Name, + Namespace: etcd.Namespace, + Labels: labels, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: headlessServiceName, + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabels, + Annotations: etcd.Spec.PodAnnotations, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: etcd.Spec.ServiceAccountName, + ImagePullSecrets: etcd.Spec.ImagePullSecrets, + Containers: []corev1.Container{ + { + Name: "etcd", + Image: image, + Resources: etcd.Spec.Resources, + Env: buildEtcdEnv(etcd.Name, etcd.Namespace, replicas, headlessServiceName), + Ports: buildContainerPorts(), + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + }, + }, + }, + Affinity: etcd.Spec.Affinity, + Tolerations: etcd.Spec.Tolerations, + NodeSelector: etcd.Spec.NodeSelector, + TopologySpreadConstraints: etcd.Spec.TopologySpreadConstraints, + }, + }, + VolumeClaimTemplates: buildVolumeClaimTemplates(etcd), + }, + } + + if err := ctrl.SetControllerReference(etcd, sts, scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + + return sts, nil +} + +// buildVolumeClaimTemplates creates the PVC templates for etcd data storage. +// Caller decides whether to use VolumeClaimTemplate or build from simple fields. +func buildVolumeClaimTemplates(etcd *multigresv1alpha1.Etcd) []corev1.PersistentVolumeClaim { + if etcd.Spec.VolumeClaimTemplate != nil { + return []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: *etcd.Spec.VolumeClaimTemplate, + }, + } + } + + storageSize := DefaultStorageSize + if etcd.Spec.StorageSize != "" { + storageSize = etcd.Spec.StorageSize + } + + return []corev1.PersistentVolumeClaim{ + storage.BuildPVCTemplate(DataVolumeName, etcd.Spec.StorageClassName, storageSize), + } +} diff --git a/pkg/resource-handler/controller/etcd/statefulset_test.go b/pkg/resource-handler/controller/etcd/statefulset_test.go new file mode 100644 index 00000000..ffb14825 --- /dev/null +++ b/pkg/resource-handler/controller/etcd/statefulset_test.go @@ -0,0 +1,266 @@ +package etcd + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +func int32Ptr(i int32) *int32 { + return &i +} + +func boolPtr(b bool) *bool { + return &b +} + +func TestBuildStatefulSet(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + tests := map[string]struct { + etcd *multigresv1alpha1.Etcd + scheme *runtime.Scheme + want *appsv1.StatefulSet + wantErr bool + }{ + "minimal spec - all defaults": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + scheme: scheme, + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "test-etcd", + UID: "test-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "test-etcd-headless", + Replicas: int32Ptr(3), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "etcd", + Image: DefaultImage, + Resources: corev1.ResourceRequirements{}, + Env: buildEtcdEnv("test-etcd", "default", 3, "test-etcd-headless"), + Ports: buildContainerPorts(), + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(DefaultStorageSize), + }, + }, + }, + }, + }, + }, + }, + }, + "custom replicas and image": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-custom", + Namespace: "test", + UID: "custom-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{ + Replicas: int32Ptr(5), + Image: "quay.io/coreos/etcd:v3.5.15", + }, + }, + scheme: scheme, + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-custom", + Namespace: "test", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-custom", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "etcd-custom", + UID: "custom-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "etcd-custom-headless", + Replicas: int32Ptr(5), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-custom", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-custom", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "etcd", + Image: "quay.io/coreos/etcd:v3.5.15", + Resources: corev1.ResourceRequirements{}, + Env: buildEtcdEnv("etcd-custom", "test", 5, "etcd-custom-headless"), + Ports: buildContainerPorts(), + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(DefaultStorageSize), + }, + }, + }, + }, + }, + }, + }, + scheme: scheme, + }, + "nil scheme - should error": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + scheme: nil, + wantErr: true, + }, + "scheme without Etcd type - should error": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + scheme: runtime.NewScheme(), // empty scheme without Etcd type + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got, err := BuildStatefulSet(tc.etcd, tc.scheme) + + if (err != nil) != tc.wantErr { + t.Errorf("BuildStatefulSet() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildStatefulSet() mismatch (-want +got):\n%s", diff) + } + }) + } +} From f116ffff53edcfd189a66a323812b4f5090f78df Mon Sep 17 00:00:00 2001 From: Ryota Date: Sun, 12 Oct 2025 01:57:48 +0100 Subject: [PATCH 06/28] Add service port handling --- pkg/resource-handler/controller/etcd/ports.go | 51 ++++++++ .../controller/etcd/ports_test.go | 111 ++++++++++++++++++ 2 files changed, 162 insertions(+) diff --git a/pkg/resource-handler/controller/etcd/ports.go b/pkg/resource-handler/controller/etcd/ports.go index 6c36d03b..07efdf51 100644 --- a/pkg/resource-handler/controller/etcd/ports.go +++ b/pkg/resource-handler/controller/etcd/ports.go @@ -2,6 +2,7 @@ package etcd import ( corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) const ( @@ -60,3 +61,53 @@ func buildContainerPorts(opts ...PortOption) []corev1.ContainerPort { }, } } + +// buildHeadlessServicePorts creates service ports for the headless service. +// Includes both client and peer ports for StatefulSet pod discovery. +func buildHeadlessServicePorts(opts ...PortOption) []corev1.ServicePort { + options := &portOptions{ + clientPort: ClientPort, + peerPort: PeerPort, + } + + for _, opt := range opts { + opt(options) + } + + return []corev1.ServicePort{ + { + Name: "client", + Port: options.clientPort, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + Port: options.peerPort, + TargetPort: intstr.FromString("peer"), + Protocol: corev1.ProtocolTCP, + }, + } +} + +// buildClientServicePorts creates service ports for the client service. +// Only includes the client port for external access. +func buildClientServicePorts(opts ...PortOption) []corev1.ServicePort { + options := &portOptions{ + clientPort: ClientPort, + peerPort: PeerPort, + } + + for _, opt := range opts { + opt(options) + } + + return []corev1.ServicePort{ + { + Name: "client", + Port: options.clientPort, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + } +} diff --git a/pkg/resource-handler/controller/etcd/ports_test.go b/pkg/resource-handler/controller/etcd/ports_test.go index 1fa96123..961e2701 100644 --- a/pkg/resource-handler/controller/etcd/ports_test.go +++ b/pkg/resource-handler/controller/etcd/ports_test.go @@ -5,6 +5,7 @@ import ( "github.com/google/go-cmp/cmp" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) func TestBuildContainerPorts(t *testing.T) { @@ -104,3 +105,113 @@ func TestBuildContainerPorts(t *testing.T) { }) } } + +func TestBuildHeadlessServicePorts(t *testing.T) { + tests := map[string]struct { + opts []PortOption + want []corev1.ServicePort + }{ + "default ports": { + opts: nil, + want: []corev1.ServicePort{ + { + Name: "client", + Port: 2379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + Port: 2380, + TargetPort: intstr.FromString("peer"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + "custom client port": { + opts: []PortOption{WithClientPort(3379)}, + want: []corev1.ServicePort{ + { + Name: "client", + Port: 3379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + Port: 2380, + TargetPort: intstr.FromString("peer"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + "both ports customized": { + opts: []PortOption{ + WithClientPort(9379), + WithPeerPort(9380), + }, + want: []corev1.ServicePort{ + { + Name: "client", + Port: 9379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + Port: 9380, + TargetPort: intstr.FromString("peer"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildHeadlessServicePorts(tc.opts...) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildHeadlessServicePorts() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildClientServicePorts(t *testing.T) { + tests := map[string]struct { + opts []PortOption + want []corev1.ServicePort + }{ + "default port": { + opts: nil, + want: []corev1.ServicePort{ + { + Name: "client", + Port: 2379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + "custom client port": { + opts: []PortOption{WithClientPort(3379)}, + want: []corev1.ServicePort{ + { + Name: "client", + Port: 3379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildClientServicePorts(tc.opts...) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildClientServicePorts() mismatch (-want +got):\n%s", diff) + } + }) + } +} From 00d821ca0038864fb5669a358e267a66abdccbf0 Mon Sep 17 00:00:00 2001 From: Ryota Date: Sun, 12 Oct 2025 02:17:27 +0100 Subject: [PATCH 07/28] Simplify port logic to use etcd obj --- pkg/resource-handler/controller/etcd/ports.go | 91 +++++------ .../controller/etcd/ports_test.go | 147 ++++-------------- .../controller/etcd/statefulset.go | 2 +- 3 files changed, 64 insertions(+), 176 deletions(-) diff --git a/pkg/resource-handler/controller/etcd/ports.go b/pkg/resource-handler/controller/etcd/ports.go index 07efdf51..491b8f20 100644 --- a/pkg/resource-handler/controller/etcd/ports.go +++ b/pkg/resource-handler/controller/etcd/ports.go @@ -3,6 +3,8 @@ package etcd import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" ) const ( @@ -13,50 +15,29 @@ const ( PeerPort = 2380 ) -// PortOption configures port settings for etcd containers. -type PortOption func(*portOptions) - -type portOptions struct { - clientPort int32 - peerPort int32 -} - -// WithClientPort overrides the default client port. -func WithClientPort(port int32) PortOption { - return func(o *portOptions) { - o.clientPort = port - } -} - -// WithPeerPort overrides the default peer port. -func WithPeerPort(port int32) PortOption { - return func(o *portOptions) { - o.peerPort = port - } -} - // buildContainerPorts creates the port definitions for the etcd container. -// These ports are used in both the StatefulSet container spec and Service -// definitions. -func buildContainerPorts(opts ...PortOption) []corev1.ContainerPort { - options := &portOptions{ - clientPort: ClientPort, - peerPort: PeerPort, - } - - for _, opt := range opts { - opt(options) - } +// Uses default ports since EtcdSpec doesn't have port configuration yet. +func buildContainerPorts(etcd *multigresv1alpha1.Etcd) []corev1.ContainerPort { + clientPort := ClientPort + peerPort := PeerPort + + // TODO: When EtcdSpec has port fields, use them: + // if etcd.Spec.ClientPort != 0 { + // clientPort = etcd.Spec.ClientPort + // } + // if etcd.Spec.PeerPort != 0 { + // peerPort = etcd.Spec.PeerPort + // } return []corev1.ContainerPort{ { Name: "client", - ContainerPort: options.clientPort, + ContainerPort: clientPort, Protocol: corev1.ProtocolTCP, }, { Name: "peer", - ContainerPort: options.peerPort, + ContainerPort: peerPort, Protocol: corev1.ProtocolTCP, }, } @@ -64,26 +45,28 @@ func buildContainerPorts(opts ...PortOption) []corev1.ContainerPort { // buildHeadlessServicePorts creates service ports for the headless service. // Includes both client and peer ports for StatefulSet pod discovery. -func buildHeadlessServicePorts(opts ...PortOption) []corev1.ServicePort { - options := &portOptions{ - clientPort: ClientPort, - peerPort: PeerPort, - } - - for _, opt := range opts { - opt(options) - } +func buildHeadlessServicePorts(etcd *multigresv1alpha1.Etcd) []corev1.ServicePort { + clientPort := ClientPort + peerPort := PeerPort + + // TODO: When EtcdSpec has port fields, use them: + // if etcd.Spec.ClientPort != 0 { + // clientPort = etcd.Spec.ClientPort + // } + // if etcd.Spec.PeerPort != 0 { + // peerPort = etcd.Spec.PeerPort + // } return []corev1.ServicePort{ { Name: "client", - Port: options.clientPort, + Port: clientPort, TargetPort: intstr.FromString("client"), Protocol: corev1.ProtocolTCP, }, { Name: "peer", - Port: options.peerPort, + Port: peerPort, TargetPort: intstr.FromString("peer"), Protocol: corev1.ProtocolTCP, }, @@ -92,20 +75,18 @@ func buildHeadlessServicePorts(opts ...PortOption) []corev1.ServicePort { // buildClientServicePorts creates service ports for the client service. // Only includes the client port for external access. -func buildClientServicePorts(opts ...PortOption) []corev1.ServicePort { - options := &portOptions{ - clientPort: ClientPort, - peerPort: PeerPort, - } +func buildClientServicePorts(etcd *multigresv1alpha1.Etcd) []corev1.ServicePort { + clientPort := ClientPort - for _, opt := range opts { - opt(options) - } + // TODO: When EtcdSpec has clientPort field, use it: + // if etcd.Spec.ClientPort != 0 { + // clientPort = etcd.Spec.ClientPort + // } return []corev1.ServicePort{ { Name: "client", - Port: options.clientPort, + Port: clientPort, TargetPort: intstr.FromString("client"), Protocol: corev1.ProtocolTCP, }, diff --git a/pkg/resource-handler/controller/etcd/ports_test.go b/pkg/resource-handler/controller/etcd/ports_test.go index 961e2701..73475a2b 100644 --- a/pkg/resource-handler/controller/etcd/ports_test.go +++ b/pkg/resource-handler/controller/etcd/ports_test.go @@ -5,46 +5,25 @@ import ( "github.com/google/go-cmp/cmp" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" ) func TestBuildContainerPorts(t *testing.T) { tests := map[string]struct { - opts []PortOption + etcd *multigresv1alpha1.Etcd want []corev1.ContainerPort }{ "default ports": { - opts: nil, - want: []corev1.ContainerPort{ - { - Name: "client", - ContainerPort: 2379, - Protocol: corev1.ProtocolTCP, - }, - { - Name: "peer", - ContainerPort: 2380, - Protocol: corev1.ProtocolTCP, + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", }, + Spec: multigresv1alpha1.EtcdSpec{}, }, - }, - "custom client port": { - opts: []PortOption{WithClientPort(3379)}, - want: []corev1.ContainerPort{ - { - Name: "client", - ContainerPort: 3379, - Protocol: corev1.ProtocolTCP, - }, - { - Name: "peer", - ContainerPort: 2380, - Protocol: corev1.ProtocolTCP, - }, - }, - }, - "custom peer port": { - opts: []PortOption{WithPeerPort(3380)}, want: []corev1.ContainerPort{ { Name: "client", @@ -53,43 +32,7 @@ func TestBuildContainerPorts(t *testing.T) { }, { Name: "peer", - ContainerPort: 3380, - Protocol: corev1.ProtocolTCP, - }, - }, - }, - "both ports customized": { - opts: []PortOption{ - WithClientPort(9379), - WithPeerPort(9380), - }, - want: []corev1.ContainerPort{ - { - Name: "client", - ContainerPort: 9379, - Protocol: corev1.ProtocolTCP, - }, - { - Name: "peer", - ContainerPort: 9380, - Protocol: corev1.ProtocolTCP, - }, - }, - }, - "zero port values - should use zero": { - opts: []PortOption{ - WithClientPort(0), - WithPeerPort(0), - }, - want: []corev1.ContainerPort{ - { - Name: "client", - ContainerPort: 0, - Protocol: corev1.ProtocolTCP, - }, - { - Name: "peer", - ContainerPort: 0, + ContainerPort: 2380, Protocol: corev1.ProtocolTCP, }, }, @@ -98,7 +41,7 @@ func TestBuildContainerPorts(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - got := buildContainerPorts(tc.opts...) + got := buildContainerPorts(tc.etcd) if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("buildContainerPorts() mismatch (-want +got):\n%s", diff) } @@ -108,32 +51,21 @@ func TestBuildContainerPorts(t *testing.T) { func TestBuildHeadlessServicePorts(t *testing.T) { tests := map[string]struct { - opts []PortOption + etcd *multigresv1alpha1.Etcd want []corev1.ServicePort }{ "default ports": { - opts: nil, - want: []corev1.ServicePort{ - { - Name: "client", - Port: 2379, - TargetPort: intstr.FromString("client"), - Protocol: corev1.ProtocolTCP, - }, - { - Name: "peer", - Port: 2380, - TargetPort: intstr.FromString("peer"), - Protocol: corev1.ProtocolTCP, + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", }, + Spec: multigresv1alpha1.EtcdSpec{}, }, - }, - "custom client port": { - opts: []PortOption{WithClientPort(3379)}, want: []corev1.ServicePort{ { Name: "client", - Port: 3379, + Port: 2379, TargetPort: intstr.FromString("client"), Protocol: corev1.ProtocolTCP, }, @@ -145,31 +77,11 @@ func TestBuildHeadlessServicePorts(t *testing.T) { }, }, }, - "both ports customized": { - opts: []PortOption{ - WithClientPort(9379), - WithPeerPort(9380), - }, - want: []corev1.ServicePort{ - { - Name: "client", - Port: 9379, - TargetPort: intstr.FromString("client"), - Protocol: corev1.ProtocolTCP, - }, - { - Name: "peer", - Port: 9380, - TargetPort: intstr.FromString("peer"), - Protocol: corev1.ProtocolTCP, - }, - }, - }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - got := buildHeadlessServicePorts(tc.opts...) + got := buildHeadlessServicePorts(tc.etcd) if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("buildHeadlessServicePorts() mismatch (-want +got):\n%s", diff) } @@ -179,26 +91,21 @@ func TestBuildHeadlessServicePorts(t *testing.T) { func TestBuildClientServicePorts(t *testing.T) { tests := map[string]struct { - opts []PortOption + etcd *multigresv1alpha1.Etcd want []corev1.ServicePort }{ "default port": { - opts: nil, - want: []corev1.ServicePort{ - { - Name: "client", - Port: 2379, - TargetPort: intstr.FromString("client"), - Protocol: corev1.ProtocolTCP, + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", }, + Spec: multigresv1alpha1.EtcdSpec{}, }, - }, - "custom client port": { - opts: []PortOption{WithClientPort(3379)}, want: []corev1.ServicePort{ { Name: "client", - Port: 3379, + Port: 2379, TargetPort: intstr.FromString("client"), Protocol: corev1.ProtocolTCP, }, @@ -208,7 +115,7 @@ func TestBuildClientServicePorts(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - got := buildClientServicePorts(tc.opts...) + got := buildClientServicePorts(tc.etcd) if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("buildClientServicePorts() mismatch (-want +got):\n%s", diff) } diff --git a/pkg/resource-handler/controller/etcd/statefulset.go b/pkg/resource-handler/controller/etcd/statefulset.go index d8e3f7c2..281126a3 100644 --- a/pkg/resource-handler/controller/etcd/statefulset.go +++ b/pkg/resource-handler/controller/etcd/statefulset.go @@ -82,7 +82,7 @@ func BuildStatefulSet(etcd *multigresv1alpha1.Etcd, scheme *runtime.Scheme) (*ap Image: image, Resources: etcd.Spec.Resources, Env: buildEtcdEnv(etcd.Name, etcd.Namespace, replicas, headlessServiceName), - Ports: buildContainerPorts(), + Ports: buildContainerPorts(etcd), VolumeMounts: []corev1.VolumeMount{ { Name: DataVolumeName, From 92b6e99c75aca7095bc7bf4e0a516dd49d2bfdfb Mon Sep 17 00:00:00 2001 From: Ryota Date: Sun, 12 Oct 2025 02:17:52 +0100 Subject: [PATCH 08/28] Add service handling --- .../controller/etcd/service.go | 64 ++++ .../controller/etcd/service_test.go | 321 ++++++++++++++++++ 2 files changed, 385 insertions(+) create mode 100644 pkg/resource-handler/controller/etcd/service.go create mode 100644 pkg/resource-handler/controller/etcd/service_test.go diff --git a/pkg/resource-handler/controller/etcd/service.go b/pkg/resource-handler/controller/etcd/service.go new file mode 100644 index 00000000..0486198f --- /dev/null +++ b/pkg/resource-handler/controller/etcd/service.go @@ -0,0 +1,64 @@ +package etcd + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/metadata" +) + +// BuildHeadlessService creates a headless Service for the Etcd StatefulSet. +// Headless services are required for StatefulSet pod DNS records. +func BuildHeadlessService(etcd *multigresv1alpha1.Etcd, scheme *runtime.Scheme) (*corev1.Service, error) { + labels := metadata.BuildStandardLabels(etcd.Name, ComponentName, etcd.Spec.CellName) + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: etcd.Name + "-headless", + Namespace: etcd.Namespace, + Labels: labels, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: corev1.ClusterIPNone, + Selector: labels, + Ports: buildHeadlessServicePorts(etcd), + PublishNotReadyAddresses: true, + }, + } + + if err := ctrl.SetControllerReference(etcd, svc, scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + + return svc, nil +} + +// BuildClientService creates a client Service for external access to Etcd. +// This service load balances across all etcd members. +func BuildClientService(etcd *multigresv1alpha1.Etcd, scheme *runtime.Scheme) (*corev1.Service, error) { + labels := metadata.BuildStandardLabels(etcd.Name, ComponentName, etcd.Spec.CellName) + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: etcd.Name, + Namespace: etcd.Namespace, + Labels: labels, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: labels, + Ports: buildClientServicePorts(etcd), + }, + } + + if err := ctrl.SetControllerReference(etcd, svc, scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + + return svc, nil +} diff --git a/pkg/resource-handler/controller/etcd/service_test.go b/pkg/resource-handler/controller/etcd/service_test.go new file mode 100644 index 00000000..ccbe4de6 --- /dev/null +++ b/pkg/resource-handler/controller/etcd/service_test.go @@ -0,0 +1,321 @@ +package etcd + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +func TestBuildHeadlessService(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + tests := map[string]struct { + etcd *multigresv1alpha1.Etcd + scheme *runtime.Scheme + want *corev1.Service + wantErr bool + }{ + "minimal spec": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + scheme: scheme, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-headless", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "test-etcd", + UID: "test-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: corev1.ClusterIPNone, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + Ports: []corev1.ServicePort{ + { + Name: "client", + Port: 2379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + Port: 2380, + TargetPort: intstr.FromString("peer"), + Protocol: corev1.ProtocolTCP, + }, + }, + PublishNotReadyAddresses: true, + }, + }, + }, + "with cellName": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-zone1", + Namespace: "production", + UID: "zone1-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{ + CellName: "zone1", + }, + }, + scheme: scheme, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-zone1-headless", + Namespace: "production", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-zone1", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "zone1", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "etcd-zone1", + UID: "zone1-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: corev1.ClusterIPNone, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-zone1", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "zone1", + }, + Ports: []corev1.ServicePort{ + { + Name: "client", + Port: 2379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + Port: 2380, + TargetPort: intstr.FromString("peer"), + Protocol: corev1.ProtocolTCP, + }, + }, + PublishNotReadyAddresses: true, + }, + }, + }, + "nil scheme - should error": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + scheme: nil, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got, err := BuildHeadlessService(tc.etcd, tc.scheme) + + if (err != nil) != tc.wantErr { + t.Errorf("BuildHeadlessService() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildHeadlessService() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildClientService(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + tests := map[string]struct { + etcd *multigresv1alpha1.Etcd + scheme *runtime.Scheme + want *corev1.Service + wantErr bool + }{ + "minimal spec": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + scheme: scheme, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "test-etcd", + UID: "test-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + Ports: []corev1.ServicePort{ + { + Name: "client", + Port: 2379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + "with cellName": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-zone2", + Namespace: "production", + UID: "zone2-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{ + CellName: "zone2", + }, + }, + scheme: scheme, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-zone2", + Namespace: "production", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-zone2", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "zone2", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "etcd-zone2", + UID: "zone2-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-zone2", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "zone2", + }, + Ports: []corev1.ServicePort{ + { + Name: "client", + Port: 2379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + "nil scheme - should error": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + scheme: nil, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got, err := BuildClientService(tc.etcd, tc.scheme) + + if (err != nil) != tc.wantErr { + t.Errorf("BuildClientService() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildClientService() mismatch (-want +got):\n%s", diff) + } + }) + } +} From d372434ee3ce4fc163e3bcedeac76ead0301b010 Mon Sep 17 00:00:00 2001 From: Ryota Date: Sun, 12 Oct 2025 03:00:22 +0100 Subject: [PATCH 09/28] Add controller logic This needs further testing and is breaking in local development due to the API spec mismatch, which is expected as all the API updates have not made to the main branch yet. --- .../controller/etcd/etcd_controller.go | 266 +++++++++++++++ .../controller/etcd/etcd_controller_test.go | 319 ++++++++++++++++++ 2 files changed, 585 insertions(+) create mode 100644 pkg/resource-handler/controller/etcd/etcd_controller.go create mode 100644 pkg/resource-handler/controller/etcd/etcd_controller_test.go diff --git a/pkg/resource-handler/controller/etcd/etcd_controller.go b/pkg/resource-handler/controller/etcd/etcd_controller.go new file mode 100644 index 00000000..826a0103 --- /dev/null +++ b/pkg/resource-handler/controller/etcd/etcd_controller.go @@ -0,0 +1,266 @@ +package etcd + +import ( + "context" + "fmt" + "slices" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +const ( + finalizerName = "etcd.multigres.com/finalizer" +) + +// EtcdReconciler reconciles an Etcd object. +type EtcdReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=multigres.com,resources=etcds,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=multigres.com,resources=etcds/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=multigres.com,resources=etcds/finalizers,verbs=update +// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete + +// Reconcile handles Etcd resource reconciliation. +func (r *EtcdReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Fetch the Etcd instance + etcd := &multigresv1alpha1.Etcd{} + if err := r.Get(ctx, req.NamespacedName, etcd); err != nil { + if errors.IsNotFound(err) { + logger.Info("Etcd resource not found, ignoring") + return ctrl.Result{}, nil + } + logger.Error(err, "Failed to get Etcd") + return ctrl.Result{}, err + } + + // Handle deletion + if !etcd.DeletionTimestamp.IsZero() { + return r.handleDeletion(ctx, etcd) + } + + // Add finalizer if not present + if !slices.Contains(etcd.Finalizers, finalizerName) { + etcd.Finalizers = append(etcd.Finalizers, finalizerName) + if err := r.Update(ctx, etcd); err != nil { + logger.Error(err, "Failed to add finalizer") + return ctrl.Result{}, err + } + } + + // Reconcile StatefulSet + if err := r.reconcileStatefulSet(ctx, etcd); err != nil { + logger.Error(err, "Failed to reconcile StatefulSet") + return ctrl.Result{}, err + } + + // Reconcile headless Service + if err := r.reconcileHeadlessService(ctx, etcd); err != nil { + logger.Error(err, "Failed to reconcile headless Service") + return ctrl.Result{}, err + } + + // Reconcile client Service + if err := r.reconcileClientService(ctx, etcd); err != nil { + logger.Error(err, "Failed to reconcile client Service") + return ctrl.Result{}, err + } + + // Update status + if err := r.updateStatus(ctx, etcd); err != nil { + logger.Error(err, "Failed to update status") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// handleDeletion handles cleanup when Etcd is being deleted. +func (r *EtcdReconciler) handleDeletion(ctx context.Context, etcd *multigresv1alpha1.Etcd) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + if slices.Contains(etcd.Finalizers, finalizerName) { + // Perform cleanup if needed + // Currently no special cleanup required - owner references handle resource deletion + + // Remove finalizer + etcd.Finalizers = slices.DeleteFunc(etcd.Finalizers, func(s string) bool { + return s == finalizerName + }) + if err := r.Update(ctx, etcd); err != nil { + logger.Error(err, "Failed to remove finalizer") + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil +} + +// reconcileStatefulSet creates or updates the StatefulSet for Etcd. +func (r *EtcdReconciler) reconcileStatefulSet(ctx context.Context, etcd *multigresv1alpha1.Etcd) error { + desired, err := BuildStatefulSet(etcd, r.Scheme) + if err != nil { + return fmt.Errorf("failed to build StatefulSet: %w", err) + } + + existing := &appsv1.StatefulSet{} + err = r.Get(ctx, client.ObjectKey{Namespace: etcd.Namespace, Name: etcd.Name}, existing) + if err != nil { + if errors.IsNotFound(err) { + // Create new StatefulSet + if err := r.Create(ctx, desired); err != nil { + return fmt.Errorf("failed to create StatefulSet: %w", err) + } + return nil + } + return fmt.Errorf("failed to get StatefulSet: %w", err) + } + + // Update existing StatefulSet + existing.Spec = desired.Spec + existing.Labels = desired.Labels + if err := r.Update(ctx, existing); err != nil { + return fmt.Errorf("failed to update StatefulSet: %w", err) + } + + return nil +} + +// reconcileHeadlessService creates or updates the headless Service for Etcd. +func (r *EtcdReconciler) reconcileHeadlessService(ctx context.Context, etcd *multigresv1alpha1.Etcd) error { + desired, err := BuildHeadlessService(etcd, r.Scheme) + if err != nil { + return fmt.Errorf("failed to build headless Service: %w", err) + } + + existing := &corev1.Service{} + err = r.Get(ctx, client.ObjectKey{Namespace: etcd.Namespace, Name: etcd.Name + "-headless"}, existing) + if err != nil { + if errors.IsNotFound(err) { + // Create new Service + if err := r.Create(ctx, desired); err != nil { + return fmt.Errorf("failed to create headless Service: %w", err) + } + return nil + } + return fmt.Errorf("failed to get headless Service: %w", err) + } + + // Update existing Service + existing.Spec.Ports = desired.Spec.Ports + existing.Spec.Selector = desired.Spec.Selector + existing.Labels = desired.Labels + if err := r.Update(ctx, existing); err != nil { + return fmt.Errorf("failed to update headless Service: %w", err) + } + + return nil +} + +// reconcileClientService creates or updates the client Service for Etcd. +func (r *EtcdReconciler) reconcileClientService(ctx context.Context, etcd *multigresv1alpha1.Etcd) error { + desired, err := BuildClientService(etcd, r.Scheme) + if err != nil { + return fmt.Errorf("failed to build client Service: %w", err) + } + + existing := &corev1.Service{} + err = r.Get(ctx, client.ObjectKey{Namespace: etcd.Namespace, Name: etcd.Name}, existing) + if err != nil { + if errors.IsNotFound(err) { + // Create new Service + if err := r.Create(ctx, desired); err != nil { + return fmt.Errorf("failed to create client Service: %w", err) + } + return nil + } + return fmt.Errorf("failed to get client Service: %w", err) + } + + // Update existing Service + existing.Spec.Ports = desired.Spec.Ports + existing.Spec.Selector = desired.Spec.Selector + existing.Labels = desired.Labels + if err := r.Update(ctx, existing); err != nil { + return fmt.Errorf("failed to update client Service: %w", err) + } + + return nil +} + +// updateStatus updates the Etcd status based on observed state. +func (r *EtcdReconciler) updateStatus(ctx context.Context, etcd *multigresv1alpha1.Etcd) error { + // Get the StatefulSet to check status + sts := &appsv1.StatefulSet{} + err := r.Get(ctx, client.ObjectKey{Namespace: etcd.Namespace, Name: etcd.Name}, sts) + if err != nil { + if errors.IsNotFound(err) { + // StatefulSet not created yet + return nil + } + return fmt.Errorf("failed to get StatefulSet for status: %w", err) + } + + // Update status fields + etcd.Status.Replicas = sts.Status.Replicas + etcd.Status.ReadyReplicas = sts.Status.ReadyReplicas + etcd.Status.Ready = sts.Status.ReadyReplicas == sts.Status.Replicas && sts.Status.Replicas > 0 + etcd.Status.ObservedGeneration = etcd.Generation + + // Update conditions + etcd.Status.Conditions = r.buildConditions(etcd, sts) + + if err := r.Status().Update(ctx, etcd); err != nil { + return fmt.Errorf("failed to update status: %w", err) + } + + return nil +} + +// buildConditions creates status conditions based on observed state. +func (r *EtcdReconciler) buildConditions(etcd *multigresv1alpha1.Etcd, sts *appsv1.StatefulSet) []metav1.Condition { + conditions := []metav1.Condition{} + + // Ready condition + readyCondition := metav1.Condition{ + Type: "Ready", + ObservedGeneration: etcd.Generation, + LastTransitionTime: metav1.Now(), + } + + if sts.Status.ReadyReplicas == sts.Status.Replicas && sts.Status.Replicas > 0 { + readyCondition.Status = metav1.ConditionTrue + readyCondition.Reason = "AllReplicasReady" + readyCondition.Message = fmt.Sprintf("All %d replicas are ready", sts.Status.ReadyReplicas) + } else { + readyCondition.Status = metav1.ConditionFalse + readyCondition.Reason = "NotAllReplicasReady" + readyCondition.Message = fmt.Sprintf("%d/%d replicas ready", sts.Status.ReadyReplicas, sts.Status.Replicas) + } + + conditions = append(conditions, readyCondition) + return conditions +} + +// SetupWithManager sets up the controller with the Manager. +func (r *EtcdReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&multigresv1alpha1.Etcd{}). + Owns(&appsv1.StatefulSet{}). + Owns(&corev1.Service{}). + Complete(r) +} diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_test.go b/pkg/resource-handler/controller/etcd/etcd_controller_test.go new file mode 100644 index 00000000..25ab78ee --- /dev/null +++ b/pkg/resource-handler/controller/etcd/etcd_controller_test.go @@ -0,0 +1,319 @@ +package etcd + +import ( + "context" + "slices" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +func TestEtcdReconciler_Reconcile(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + tests := map[string]struct { + etcd *multigresv1alpha1.Etcd + existingObjects []client.Object + wantStatefulSet bool + wantHeadlessService bool + wantClientService bool + wantFinalizer bool + wantErr bool + }{ + "create all resources for new Etcd": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + wantStatefulSet: true, + wantHeadlessService: true, + wantClientService: true, + wantFinalizer: true, + }, + "update existing resources": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-etcd", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{ + Replicas: int32Ptr(5), + }, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-etcd", + Namespace: "default", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: int32Ptr(3), // old value + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-etcd-headless", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-etcd", + Namespace: "default", + }, + }, + }, + wantStatefulSet: true, + wantHeadlessService: true, + wantClientService: true, + wantFinalizer: true, + }, + "etcd with cellName": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-zone1", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{ + CellName: "zone1", + }, + }, + existingObjects: []client.Object{}, + wantStatefulSet: true, + wantHeadlessService: true, + wantClientService: true, + wantFinalizer: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + // Create fake client with existing objects + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(tc.existingObjects...). + WithStatusSubresource(&multigresv1alpha1.Etcd{}). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + // Create the Etcd resource if not in existing objects + etcdInExisting := false + for _, obj := range tc.existingObjects { + if etcd, ok := obj.(*multigresv1alpha1.Etcd); ok && etcd.Name == tc.etcd.Name { + etcdInExisting = true + break + } + } + if !etcdInExisting { + err := fakeClient.Create(context.Background(), tc.etcd) + if err != nil { + t.Fatalf("Failed to create Etcd: %v", err) + } + } + + // Reconcile + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: tc.etcd.Name, + Namespace: tc.etcd.Namespace, + }, + } + + _, err := reconciler.Reconcile(context.Background(), req) + if (err != nil) != tc.wantErr { + t.Errorf("Reconcile() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + // Verify StatefulSet was created + if tc.wantStatefulSet { + sts := &appsv1.StatefulSet{} + err := fakeClient.Get(context.Background(), types.NamespacedName{ + Name: tc.etcd.Name, + Namespace: tc.etcd.Namespace, + }, sts) + if err != nil { + t.Errorf("Expected StatefulSet to exist, got error: %v", err) + } + } + + // Verify headless Service was created + if tc.wantHeadlessService { + svc := &corev1.Service{} + err := fakeClient.Get(context.Background(), types.NamespacedName{ + Name: tc.etcd.Name + "-headless", + Namespace: tc.etcd.Namespace, + }, svc) + if err != nil { + t.Errorf("Expected headless Service to exist, got error: %v", err) + } + } + + // Verify client Service was created + if tc.wantClientService { + svc := &corev1.Service{} + err := fakeClient.Get(context.Background(), types.NamespacedName{ + Name: tc.etcd.Name, + Namespace: tc.etcd.Namespace, + }, svc) + if err != nil { + t.Errorf("Expected client Service to exist, got error: %v", err) + } + } + + // Verify finalizer was added + if tc.wantFinalizer { + etcd := &multigresv1alpha1.Etcd{} + err := fakeClient.Get(context.Background(), types.NamespacedName{ + Name: tc.etcd.Name, + Namespace: tc.etcd.Namespace, + }, etcd) + if err != nil { + t.Fatalf("Failed to get Etcd: %v", err) + } + if !slices.Contains(etcd.Finalizers, finalizerName) { + t.Errorf("Expected finalizer %s to be present", finalizerName) + } + } + }) + } +} + +func TestEtcdReconciler_HandleDeletion(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + now := metav1.Now() + + tests := map[string]struct { + etcd *multigresv1alpha1.Etcd + wantFinalizerRemoved bool + }{ + "remove finalizer on deletion": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + DeletionTimestamp: &now, + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + wantFinalizerRemoved: true, + }, + "no finalizer to remove": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + DeletionTimestamp: &now, + Finalizers: []string{}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + wantFinalizerRemoved: false, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(tc.etcd). + WithStatusSubresource(&multigresv1alpha1.Etcd{}). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: tc.etcd.Name, + Namespace: tc.etcd.Namespace, + }, + } + + _, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Fatalf("Reconcile() unexpected error = %v", err) + } + + // Verify finalizer state + etcd := &multigresv1alpha1.Etcd{} + err = fakeClient.Get(context.Background(), types.NamespacedName{ + Name: tc.etcd.Name, + Namespace: tc.etcd.Namespace, + }, etcd) + if err != nil { + t.Fatalf("Failed to get Etcd: %v", err) + } + + hasFinalizer := slices.Contains(etcd.Finalizers, finalizerName) + if tc.wantFinalizerRemoved && hasFinalizer { + t.Errorf("Expected finalizer to be removed, but it's still present") + } + if !tc.wantFinalizerRemoved && len(tc.etcd.Finalizers) > 0 && !hasFinalizer && slices.Contains(etcd.Finalizers, finalizerName) { + t.Errorf("Expected finalizer to be present, but it's removed") + } + }) + } +} + +func TestEtcdReconciler_ReconcileNotFound(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "nonexistent-etcd", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Errorf("Reconcile() should not error on NotFound, got: %v", err) + } + + if result != (ctrl.Result{}) { + t.Errorf("Reconcile() should return empty Result on NotFound, got: %v", result) + } +} From a71e22a30851e67c294990a8ea697559ad620f10 Mon Sep 17 00:00:00 2001 From: Ryota Date: Sun, 12 Oct 2025 12:53:05 +0100 Subject: [PATCH 10/28] Fix port and scheme panic based test --- pkg/resource-handler/controller/etcd/ports.go | 4 ++-- .../controller/etcd/service_test.go | 22 ------------------- .../controller/etcd/statefulset_test.go | 16 ++------------ 3 files changed, 4 insertions(+), 38 deletions(-) diff --git a/pkg/resource-handler/controller/etcd/ports.go b/pkg/resource-handler/controller/etcd/ports.go index 491b8f20..8d77e880 100644 --- a/pkg/resource-handler/controller/etcd/ports.go +++ b/pkg/resource-handler/controller/etcd/ports.go @@ -9,10 +9,10 @@ import ( const ( // ClientPort is the default port for etcd client connections. - ClientPort = 2379 + ClientPort int32 = 2379 // PeerPort is the default port for etcd peer connections. - PeerPort = 2380 + PeerPort int32 = 2380 ) // buildContainerPorts creates the port definitions for the etcd container. diff --git a/pkg/resource-handler/controller/etcd/service_test.go b/pkg/resource-handler/controller/etcd/service_test.go index ccbe4de6..0d3596d5 100644 --- a/pkg/resource-handler/controller/etcd/service_test.go +++ b/pkg/resource-handler/controller/etcd/service_test.go @@ -140,17 +140,6 @@ func TestBuildHeadlessService(t *testing.T) { }, }, }, - "nil scheme - should error": { - etcd: &multigresv1alpha1.Etcd{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - }, - Spec: multigresv1alpha1.EtcdSpec{}, - }, - scheme: nil, - wantErr: true, - }, } for name, tc := range tests { @@ -287,17 +276,6 @@ func TestBuildClientService(t *testing.T) { }, }, }, - "nil scheme - should error": { - etcd: &multigresv1alpha1.Etcd{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - }, - Spec: multigresv1alpha1.EtcdSpec{}, - }, - scheme: nil, - wantErr: true, - }, } for name, tc := range tests { diff --git a/pkg/resource-handler/controller/etcd/statefulset_test.go b/pkg/resource-handler/controller/etcd/statefulset_test.go index ffb14825..365ea743 100644 --- a/pkg/resource-handler/controller/etcd/statefulset_test.go +++ b/pkg/resource-handler/controller/etcd/statefulset_test.go @@ -93,7 +93,7 @@ func TestBuildStatefulSet(t *testing.T) { Image: DefaultImage, Resources: corev1.ResourceRequirements{}, Env: buildEtcdEnv("test-etcd", "default", 3, "test-etcd-headless"), - Ports: buildContainerPorts(), + Ports: buildContainerPorts(nil), // Default VolumeMounts: []corev1.VolumeMount{ { Name: DataVolumeName, @@ -189,7 +189,7 @@ func TestBuildStatefulSet(t *testing.T) { Image: "quay.io/coreos/etcd:v3.5.15", Resources: corev1.ResourceRequirements{}, Env: buildEtcdEnv("etcd-custom", "test", 5, "etcd-custom-headless"), - Ports: buildContainerPorts(), + Ports: buildContainerPorts(nil), VolumeMounts: []corev1.VolumeMount{ { Name: DataVolumeName, @@ -219,18 +219,6 @@ func TestBuildStatefulSet(t *testing.T) { }, }, }, - scheme: scheme, - }, - "nil scheme - should error": { - etcd: &multigresv1alpha1.Etcd{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - }, - Spec: multigresv1alpha1.EtcdSpec{}, - }, - scheme: nil, - wantErr: true, }, "scheme without Etcd type - should error": { etcd: &multigresv1alpha1.Etcd{ From 046817ce88382a4e7b9c7d102811be2063edceba Mon Sep 17 00:00:00 2001 From: Ryota Date: Mon, 13 Oct 2025 02:52:59 +0100 Subject: [PATCH 11/28] Fix standard labels to match --- pkg/resource-handler/controller/etcd/service_test.go | 12 ++++++++++++ .../controller/etcd/statefulset_test.go | 12 ++++++++++++ 2 files changed, 24 insertions(+) diff --git a/pkg/resource-handler/controller/etcd/service_test.go b/pkg/resource-handler/controller/etcd/service_test.go index 0d3596d5..9a9ca6dd 100644 --- a/pkg/resource-handler/controller/etcd/service_test.go +++ b/pkg/resource-handler/controller/etcd/service_test.go @@ -40,7 +40,9 @@ func TestBuildHeadlessService(t *testing.T) { "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "test-etcd", "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", }, OwnerReferences: []metav1.OwnerReference{ { @@ -59,7 +61,9 @@ func TestBuildHeadlessService(t *testing.T) { "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "test-etcd", "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", }, Ports: []corev1.ServicePort{ { @@ -99,6 +103,7 @@ func TestBuildHeadlessService(t *testing.T) { "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "etcd-zone1", "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", "multigres.com/cell": "zone1", }, @@ -119,6 +124,7 @@ func TestBuildHeadlessService(t *testing.T) { "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "etcd-zone1", "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", "multigres.com/cell": "zone1", }, @@ -190,7 +196,9 @@ func TestBuildClientService(t *testing.T) { "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "test-etcd", "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", }, OwnerReferences: []metav1.OwnerReference{ { @@ -209,7 +217,9 @@ func TestBuildClientService(t *testing.T) { "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "test-etcd", "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", }, Ports: []corev1.ServicePort{ { @@ -242,6 +252,7 @@ func TestBuildClientService(t *testing.T) { "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "etcd-zone2", "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", "multigres.com/cell": "zone2", }, @@ -262,6 +273,7 @@ func TestBuildClientService(t *testing.T) { "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "etcd-zone2", "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", "multigres.com/cell": "zone2", }, diff --git a/pkg/resource-handler/controller/etcd/statefulset_test.go b/pkg/resource-handler/controller/etcd/statefulset_test.go index 365ea743..1cd6a8e2 100644 --- a/pkg/resource-handler/controller/etcd/statefulset_test.go +++ b/pkg/resource-handler/controller/etcd/statefulset_test.go @@ -49,7 +49,9 @@ func TestBuildStatefulSet(t *testing.T) { "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "test-etcd", "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", }, OwnerReferences: []metav1.OwnerReference{ { @@ -70,7 +72,9 @@ func TestBuildStatefulSet(t *testing.T) { "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "test-etcd", "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", }, }, PodManagementPolicy: appsv1.ParallelPodManagement, @@ -83,7 +87,9 @@ func TestBuildStatefulSet(t *testing.T) { "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "test-etcd", "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", }, }, Spec: corev1.PodSpec{ @@ -145,7 +151,9 @@ func TestBuildStatefulSet(t *testing.T) { "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "etcd-custom", "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", }, OwnerReferences: []metav1.OwnerReference{ { @@ -166,7 +174,9 @@ func TestBuildStatefulSet(t *testing.T) { "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "etcd-custom", "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", }, }, PodManagementPolicy: appsv1.ParallelPodManagement, @@ -179,7 +189,9 @@ func TestBuildStatefulSet(t *testing.T) { "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "etcd-custom", "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", }, }, Spec: corev1.PodSpec{ From 30619500658fe485b4f7497d525c39957ab82547 Mon Sep 17 00:00:00 2001 From: Ryota Date: Mon, 13 Oct 2025 02:23:08 +0100 Subject: [PATCH 12/28] Add fake client wrapper for failure test --- .../controller/testutil/fake_client.go | 220 +++++++ .../controller/testutil/fake_client_test.go | 602 ++++++++++++++++++ 2 files changed, 822 insertions(+) create mode 100644 pkg/resource-handler/controller/testutil/fake_client.go create mode 100644 pkg/resource-handler/controller/testutil/fake_client_test.go diff --git a/pkg/resource-handler/controller/testutil/fake_client.go b/pkg/resource-handler/controller/testutil/fake_client.go new file mode 100644 index 00000000..f0582739 --- /dev/null +++ b/pkg/resource-handler/controller/testutil/fake_client.go @@ -0,0 +1,220 @@ +// Package testutil provides testing utilities for controller tests. +package testutil + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// FailureConfig configures when the fake client should return errors. +// Each field is a function that receives the object/key and returns an error if the operation should fail. +type FailureConfig struct { + // OnGet is called before Get operations. Return non-nil to fail the operation. + OnGet func(key client.ObjectKey) error + + // OnList is called before List operations. Return non-nil to fail the operation. + OnList func(list client.ObjectList) error + + // OnCreate is called before Create operations. Return non-nil to fail the operation. + OnCreate func(obj client.Object) error + + // OnUpdate is called before Update operations. Return non-nil to fail the operation. + OnUpdate func(obj client.Object) error + + // OnPatch is called before Patch operations. Return non-nil to fail the operation. + OnPatch func(obj client.Object) error + + // OnDelete is called before Delete operations. Return non-nil to fail the operation. + OnDelete func(obj client.Object) error + + // OnDeleteAllOf is called before DeleteAllOf operations. Return non-nil to fail the operation. + OnDeleteAllOf func(obj client.Object) error + + // OnStatusUpdate is called before Status().Update() operations. Return non-nil to fail the operation. + OnStatusUpdate func(obj client.Object) error + + // OnStatusPatch is called before Status().Patch() operations. Return non-nil to fail the operation. + OnStatusPatch func(obj client.Object) error +} + +// fakeClientWithFailures wraps a real fake client and injects failures based on configuration. +type fakeClientWithFailures struct { + client.Client + config *FailureConfig +} + +// NewFakeClientWithFailures creates a fake client that can be configured to fail operations. +// This is useful for testing error handling paths in controllers. +func NewFakeClientWithFailures(baseClient client.Client, config *FailureConfig) client.Client { + if config == nil { + config = &FailureConfig{} + } + return &fakeClientWithFailures{ + Client: baseClient, + config: config, + } +} + +func (c *fakeClientWithFailures) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + if c.config.OnGet != nil { + if err := c.config.OnGet(key); err != nil { + return err + } + } + return c.Client.Get(ctx, key, obj, opts...) +} + +func (c *fakeClientWithFailures) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + if c.config.OnList != nil { + if err := c.config.OnList(list); err != nil { + return err + } + } + return c.Client.List(ctx, list, opts...) +} + +func (c *fakeClientWithFailures) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + if c.config.OnCreate != nil { + if err := c.config.OnCreate(obj); err != nil { + return err + } + } + return c.Client.Create(ctx, obj, opts...) +} + +func (c *fakeClientWithFailures) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + if c.config.OnUpdate != nil { + if err := c.config.OnUpdate(obj); err != nil { + return err + } + } + return c.Client.Update(ctx, obj, opts...) +} + +func (c *fakeClientWithFailures) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + if c.config.OnPatch != nil { + if err := c.config.OnPatch(obj); err != nil { + return err + } + } + return c.Client.Patch(ctx, obj, patch, opts...) +} + +func (c *fakeClientWithFailures) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + if c.config.OnDelete != nil { + if err := c.config.OnDelete(obj); err != nil { + return err + } + } + return c.Client.Delete(ctx, obj, opts...) +} + +func (c *fakeClientWithFailures) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + if c.config.OnDeleteAllOf != nil { + if err := c.config.OnDeleteAllOf(obj); err != nil { + return err + } + } + return c.Client.DeleteAllOf(ctx, obj, opts...) +} + +func (c *fakeClientWithFailures) Status() client.StatusWriter { + return &statusWriterWithFailures{ + StatusWriter: c.Client.Status(), + config: c.config, + } +} + +type statusWriterWithFailures struct { + client.StatusWriter + config *FailureConfig +} + +func (s *statusWriterWithFailures) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + if s.config.OnStatusUpdate != nil { + if err := s.config.OnStatusUpdate(obj); err != nil { + return err + } + } + return s.StatusWriter.Update(ctx, obj, opts...) +} + +func (s *statusWriterWithFailures) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + if s.config.OnStatusPatch != nil { + if err := s.config.OnStatusPatch(obj); err != nil { + return err + } + } + return s.StatusWriter.Patch(ctx, obj, patch, opts...) +} + +// Helper functions for common failure scenarios + +// FailOnObjectName returns an error if the object name matches. +func FailOnObjectName(name string, err error) func(client.Object) error { + return func(obj client.Object) error { + accessor, metaErr := meta.Accessor(obj) + if metaErr != nil { + panic(fmt.Sprintf("meta.Accessor failed: %v", metaErr)) + } + if accessor.GetName() == name { + return err + } + return nil + } +} + +// FailOnKeyName returns an error if the key name matches. +func FailOnKeyName(name string, err error) func(client.ObjectKey) error { + return func(key client.ObjectKey) error { + if key.Name == name { + return err + } + return nil + } +} + +// FailOnNamespace returns an error if the namespace matches. +func FailOnNamespace(namespace string, err error) func(client.Object) error { + return func(obj client.Object) error { + accessor, metaErr := meta.Accessor(obj) + if metaErr != nil { + panic(fmt.Sprintf("meta.Accessor failed: %v", metaErr)) + } + if accessor.GetNamespace() == namespace { + return err + } + return nil + } +} + +// AlwaysFail returns the given error for all operations. +func AlwaysFail(err error) func(interface{}) error { + return func(interface{}) error { + return err + } +} + +// FailAfterNCalls returns an error after N successful calls. +func FailAfterNCalls(n int, err error) func() func(interface{}) error { + count := 0 + return func() func(interface{}) error { + return func(interface{}) error { + count++ + if count > n { + return err + } + return nil + } + } +} + +// Common errors for testing +var ( + ErrInjected = fmt.Errorf("injected test error") + ErrNetworkTimeout = fmt.Errorf("network timeout") + ErrPermissionError = fmt.Errorf("permission denied") +) diff --git a/pkg/resource-handler/controller/testutil/fake_client_test.go b/pkg/resource-handler/controller/testutil/fake_client_test.go new file mode 100644 index 00000000..718c0fdc --- /dev/null +++ b/pkg/resource-handler/controller/testutil/fake_client_test.go @@ -0,0 +1,602 @@ +package testutil + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestFakeClientWithFailures_Get(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + key client.ObjectKey + wantErr bool + }{ + "no failure - get succeeds": { + config: nil, + key: client.ObjectKey{ + Name: "test-pod", + Namespace: "default", + }, + wantErr: false, + }, + "fail on specific name": { + config: &FailureConfig{ + OnGet: FailOnKeyName("test-pod", ErrInjected), + }, + key: client.ObjectKey{ + Name: "test-pod", + Namespace: "default", + }, + wantErr: true, + }, + "no failure on different name": { + config: &FailureConfig{ + OnGet: FailOnKeyName("other-pod", ErrInjected), + }, + key: client.ObjectKey{ + Name: "test-pod", + Namespace: "default", + }, + wantErr: false, + }, + "always fail": { + config: &FailureConfig{ + OnGet: func(key client.ObjectKey) error { + return ErrInjected + }, + }, + key: client.ObjectKey{ + Name: "test-pod", + Namespace: "default", + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + result := &corev1.Pod{} + err := fakeClient.Get(context.Background(), tc.key, result) + + if (err != nil) != tc.wantErr { + t.Errorf("Get() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_Create(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + tests := map[string]struct { + config *FailureConfig + obj *corev1.Pod + wantErr bool + }{ + "no failure - create succeeds": { + config: nil, + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-pod", + Namespace: "default", + }, + }, + wantErr: false, + }, + "fail on specific object name": { + config: &FailureConfig{ + OnCreate: FailOnObjectName("new-pod", ErrPermissionError), + }, + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-pod", + Namespace: "default", + }, + }, + wantErr: true, + }, + "no failure on different object name": { + config: &FailureConfig{ + OnCreate: FailOnObjectName("other-pod", ErrPermissionError), + }, + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-pod", + Namespace: "default", + }, + }, + wantErr: false, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + err := fakeClient.Create(context.Background(), tc.obj) + + if (err != nil) != tc.wantErr { + t.Errorf("Create() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_Update(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + wantErr bool + }{ + "no failure - update succeeds": { + config: nil, + wantErr: false, + }, + "fail on update": { + config: &FailureConfig{ + OnUpdate: FailOnObjectName("test-pod", ErrInjected), + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + err := fakeClient.Update(context.Background(), pod) + + if (err != nil) != tc.wantErr { + t.Errorf("Update() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_Delete(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + wantErr bool + }{ + "no failure - delete succeeds": { + config: nil, + wantErr: false, + }, + "fail on delete": { + config: &FailureConfig{ + OnDelete: FailOnObjectName("test-pod", ErrInjected), + }, + wantErr: true, + }, + "fail on namespace": { + config: &FailureConfig{ + OnDelete: FailOnNamespace("default", ErrPermissionError), + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod.DeepCopy()). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + err := fakeClient.Delete(context.Background(), pod) + + if (err != nil) != tc.wantErr { + t.Errorf("Delete() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_StatusUpdate(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + wantErr bool + }{ + "no failure - status update succeeds": { + config: nil, + wantErr: false, + }, + "fail on status update": { + config: &FailureConfig{ + OnStatusUpdate: FailOnObjectName("test-pod", ErrInjected), + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod). + WithStatusSubresource(&corev1.Pod{}). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + err := fakeClient.Status().Update(context.Background(), pod) + + if (err != nil) != tc.wantErr { + t.Errorf("Status().Update() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_List(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + wantErr bool + }{ + "no failure - list succeeds": { + config: nil, + wantErr: false, + }, + "fail on list": { + config: &FailureConfig{ + OnList: func(list client.ObjectList) error { + return ErrInjected + }, + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + podList := &corev1.PodList{} + err := fakeClient.List(context.Background(), podList) + + if (err != nil) != tc.wantErr { + t.Errorf("List() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_Patch(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + wantErr bool + }{ + "no failure - patch succeeds": { + config: nil, + wantErr: false, + }, + "fail on patch": { + config: &FailureConfig{ + OnPatch: FailOnObjectName("test-pod", ErrInjected), + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod.DeepCopy()). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + patch := client.MergeFrom(pod.DeepCopy()) + err := fakeClient.Patch(context.Background(), pod, patch) + + if (err != nil) != tc.wantErr { + t.Errorf("Patch() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_DeleteAllOf(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + wantErr bool + }{ + "no failure - deleteAllOf succeeds": { + config: nil, + wantErr: false, + }, + "fail on deleteAllOf": { + config: &FailureConfig{ + OnDeleteAllOf: func(obj client.Object) error { + return ErrInjected + }, + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + err := fakeClient.DeleteAllOf(context.Background(), &corev1.Pod{}, client.InNamespace("default")) + + if (err != nil) != tc.wantErr { + t.Errorf("DeleteAllOf() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestFakeClientWithFailures_StatusPatch(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + tests := map[string]struct { + config *FailureConfig + wantErr bool + }{ + "no failure - status patch succeeds": { + config: nil, + wantErr: false, + }, + "fail on status patch": { + config: &FailureConfig{ + OnStatusPatch: FailOnObjectName("test-pod", ErrInjected), + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod.DeepCopy()). + WithStatusSubresource(&corev1.Pod{}). + Build() + + fakeClient := NewFakeClientWithFailures(baseClient, tc.config) + + patch := client.MergeFrom(pod.DeepCopy()) + err := fakeClient.Status().Patch(context.Background(), pod, patch) + + if (err != nil) != tc.wantErr { + t.Errorf("Status().Patch() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func TestHelperFunctions(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + } + + t.Run("FailOnObjectName - matching name", func(t *testing.T) { + fn := FailOnObjectName("test-pod", ErrInjected) + err := fn(pod) + if err != ErrInjected { + t.Errorf("Expected ErrInjected, got %v", err) + } + }) + + t.Run("FailOnObjectName - different name", func(t *testing.T) { + fn := FailOnObjectName("other-pod", ErrInjected) + err := fn(pod) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + }) + + t.Run("FailOnKeyName - matching name", func(t *testing.T) { + fn := FailOnKeyName("test-pod", ErrInjected) + err := fn(client.ObjectKey{Name: "test-pod", Namespace: "default"}) + if err != ErrInjected { + t.Errorf("Expected ErrInjected, got %v", err) + } + }) + + t.Run("FailOnKeyName - different name", func(t *testing.T) { + fn := FailOnKeyName("other-pod", ErrInjected) + err := fn(client.ObjectKey{Name: "test-pod", Namespace: "default"}) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + }) + + t.Run("FailOnNamespace - matching namespace", func(t *testing.T) { + fn := FailOnNamespace("default", ErrInjected) + err := fn(pod) + if err != ErrInjected { + t.Errorf("Expected ErrInjected, got %v", err) + } + }) + + t.Run("FailOnNamespace - different namespace", func(t *testing.T) { + fn := FailOnNamespace("other-ns", ErrInjected) + err := fn(pod) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + }) + + t.Run("FailAfterNCalls", func(t *testing.T) { + fn := FailAfterNCalls(2, ErrInjected)() + + // First call - should succeed + if err := fn(nil); err != nil { + t.Errorf("Call 1: expected no error, got %v", err) + } + + // Second call - should succeed + if err := fn(nil); err != nil { + t.Errorf("Call 2: expected no error, got %v", err) + } + + // Third call - should fail + if err := fn(nil); err != ErrInjected { + t.Errorf("Call 3: expected ErrInjected, got %v", err) + } + + // Fourth call - should fail + if err := fn(nil); err != ErrInjected { + t.Errorf("Call 4: expected ErrInjected, got %v", err) + } + }) + + t.Run("AlwaysFail with object", func(t *testing.T) { + fn := AlwaysFail(ErrInjected) + err := fn(pod) + if err != ErrInjected { + t.Errorf("Expected ErrInjected, got %v", err) + } + }) + + t.Run("AlwaysFail with key", func(t *testing.T) { + fn := AlwaysFail(ErrNetworkTimeout) + err := fn(client.ObjectKey{Name: "test", Namespace: "default"}) + if err != ErrNetworkTimeout { + t.Errorf("Expected ErrNetworkTimeout, got %v", err) + } + }) +} + +func TestHelperFunctions_Panic(t *testing.T) { + t.Run("FailOnObjectName - panics on nil object", func(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Errorf("Expected panic when meta.Accessor fails on nil") + } + }() + + fn := FailOnObjectName("test", ErrInjected) + _ = fn(nil) // Should panic + }) + + t.Run("FailOnNamespace - panics on nil object", func(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Errorf("Expected panic when meta.Accessor fails on nil") + } + }() + + fn := FailOnNamespace("default", ErrInjected) + _ = fn(nil) // Should panic + }) +} From 1eccb32c8cd3ef615f62319e531d0683250d5912 Mon Sep 17 00:00:00 2001 From: Ryota Date: Mon, 13 Oct 2025 02:23:25 +0100 Subject: [PATCH 13/28] Make all tests parallel --- .../controller/testutil/fake_client_test.go | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/pkg/resource-handler/controller/testutil/fake_client_test.go b/pkg/resource-handler/controller/testutil/fake_client_test.go index 718c0fdc..7ce399a3 100644 --- a/pkg/resource-handler/controller/testutil/fake_client_test.go +++ b/pkg/resource-handler/controller/testutil/fake_client_test.go @@ -12,6 +12,8 @@ import ( ) func TestFakeClientWithFailures_Get(t *testing.T) { + t.Parallel() + scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) @@ -71,6 +73,8 @@ func TestFakeClientWithFailures_Get(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { + t.Parallel() + baseClient := fake.NewClientBuilder(). WithScheme(scheme). WithObjects(pod). @@ -89,6 +93,8 @@ func TestFakeClientWithFailures_Get(t *testing.T) { } func TestFakeClientWithFailures_Create(t *testing.T) { + t.Parallel() + scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) @@ -135,6 +141,8 @@ func TestFakeClientWithFailures_Create(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { + t.Parallel() + baseClient := fake.NewClientBuilder(). WithScheme(scheme). Build() @@ -151,6 +159,8 @@ func TestFakeClientWithFailures_Create(t *testing.T) { } func TestFakeClientWithFailures_Update(t *testing.T) { + t.Parallel() + scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) @@ -179,6 +189,8 @@ func TestFakeClientWithFailures_Update(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { + t.Parallel() + baseClient := fake.NewClientBuilder(). WithScheme(scheme). WithObjects(pod). @@ -196,6 +208,8 @@ func TestFakeClientWithFailures_Update(t *testing.T) { } func TestFakeClientWithFailures_Delete(t *testing.T) { + t.Parallel() + scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) @@ -230,6 +244,8 @@ func TestFakeClientWithFailures_Delete(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { + t.Parallel() + baseClient := fake.NewClientBuilder(). WithScheme(scheme). WithObjects(pod.DeepCopy()). @@ -247,6 +263,8 @@ func TestFakeClientWithFailures_Delete(t *testing.T) { } func TestFakeClientWithFailures_StatusUpdate(t *testing.T) { + t.Parallel() + scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) @@ -275,6 +293,8 @@ func TestFakeClientWithFailures_StatusUpdate(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { + t.Parallel() + baseClient := fake.NewClientBuilder(). WithScheme(scheme). WithObjects(pod). @@ -293,6 +313,8 @@ func TestFakeClientWithFailures_StatusUpdate(t *testing.T) { } func TestFakeClientWithFailures_List(t *testing.T) { + t.Parallel() + scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) @@ -323,6 +345,8 @@ func TestFakeClientWithFailures_List(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { + t.Parallel() + baseClient := fake.NewClientBuilder(). WithScheme(scheme). WithObjects(pod). @@ -341,6 +365,8 @@ func TestFakeClientWithFailures_List(t *testing.T) { } func TestFakeClientWithFailures_Patch(t *testing.T) { + t.Parallel() + scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) @@ -369,6 +395,8 @@ func TestFakeClientWithFailures_Patch(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { + t.Parallel() + baseClient := fake.NewClientBuilder(). WithScheme(scheme). WithObjects(pod.DeepCopy()). @@ -387,6 +415,8 @@ func TestFakeClientWithFailures_Patch(t *testing.T) { } func TestFakeClientWithFailures_DeleteAllOf(t *testing.T) { + t.Parallel() + scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) @@ -417,6 +447,8 @@ func TestFakeClientWithFailures_DeleteAllOf(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { + t.Parallel() + baseClient := fake.NewClientBuilder(). WithScheme(scheme). WithObjects(pod). @@ -434,6 +466,8 @@ func TestFakeClientWithFailures_DeleteAllOf(t *testing.T) { } func TestFakeClientWithFailures_StatusPatch(t *testing.T) { + t.Parallel() + scheme := runtime.NewScheme() _ = corev1.AddToScheme(scheme) @@ -462,6 +496,8 @@ func TestFakeClientWithFailures_StatusPatch(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { + t.Parallel() + baseClient := fake.NewClientBuilder(). WithScheme(scheme). WithObjects(pod.DeepCopy()). @@ -481,6 +517,8 @@ func TestFakeClientWithFailures_StatusPatch(t *testing.T) { } func TestHelperFunctions(t *testing.T) { + t.Parallel() + pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "test-pod", @@ -489,6 +527,8 @@ func TestHelperFunctions(t *testing.T) { } t.Run("FailOnObjectName - matching name", func(t *testing.T) { + t.Parallel() + fn := FailOnObjectName("test-pod", ErrInjected) err := fn(pod) if err != ErrInjected { @@ -497,6 +537,8 @@ func TestHelperFunctions(t *testing.T) { }) t.Run("FailOnObjectName - different name", func(t *testing.T) { + t.Parallel() + fn := FailOnObjectName("other-pod", ErrInjected) err := fn(pod) if err != nil { @@ -505,6 +547,8 @@ func TestHelperFunctions(t *testing.T) { }) t.Run("FailOnKeyName - matching name", func(t *testing.T) { + t.Parallel() + fn := FailOnKeyName("test-pod", ErrInjected) err := fn(client.ObjectKey{Name: "test-pod", Namespace: "default"}) if err != ErrInjected { @@ -513,6 +557,8 @@ func TestHelperFunctions(t *testing.T) { }) t.Run("FailOnKeyName - different name", func(t *testing.T) { + t.Parallel() + fn := FailOnKeyName("other-pod", ErrInjected) err := fn(client.ObjectKey{Name: "test-pod", Namespace: "default"}) if err != nil { @@ -521,6 +567,8 @@ func TestHelperFunctions(t *testing.T) { }) t.Run("FailOnNamespace - matching namespace", func(t *testing.T) { + t.Parallel() + fn := FailOnNamespace("default", ErrInjected) err := fn(pod) if err != ErrInjected { @@ -529,6 +577,8 @@ func TestHelperFunctions(t *testing.T) { }) t.Run("FailOnNamespace - different namespace", func(t *testing.T) { + t.Parallel() + fn := FailOnNamespace("other-ns", ErrInjected) err := fn(pod) if err != nil { @@ -537,6 +587,8 @@ func TestHelperFunctions(t *testing.T) { }) t.Run("FailAfterNCalls", func(t *testing.T) { + t.Parallel() + fn := FailAfterNCalls(2, ErrInjected)() // First call - should succeed @@ -561,6 +613,8 @@ func TestHelperFunctions(t *testing.T) { }) t.Run("AlwaysFail with object", func(t *testing.T) { + t.Parallel() + fn := AlwaysFail(ErrInjected) err := fn(pod) if err != ErrInjected { @@ -569,6 +623,8 @@ func TestHelperFunctions(t *testing.T) { }) t.Run("AlwaysFail with key", func(t *testing.T) { + t.Parallel() + fn := AlwaysFail(ErrNetworkTimeout) err := fn(client.ObjectKey{Name: "test", Namespace: "default"}) if err != ErrNetworkTimeout { @@ -578,7 +634,11 @@ func TestHelperFunctions(t *testing.T) { } func TestHelperFunctions_Panic(t *testing.T) { + t.Parallel() + t.Run("FailOnObjectName - panics on nil object", func(t *testing.T) { + t.Parallel() + defer func() { if r := recover(); r == nil { t.Errorf("Expected panic when meta.Accessor fails on nil") @@ -590,6 +650,8 @@ func TestHelperFunctions_Panic(t *testing.T) { }) t.Run("FailOnNamespace - panics on nil object", func(t *testing.T) { + t.Parallel() + defer func() { if r := recover(); r == nil { t.Errorf("Expected panic when meta.Accessor fails on nil") From c8c3aac4af1d5afc19440a7abf48bf3fb51dec41 Mon Sep 17 00:00:00 2001 From: Ryota Date: Mon, 13 Oct 2025 02:51:13 +0100 Subject: [PATCH 14/28] Only add working tests with failure injection --- .../controller/etcd/etcd_controller_test.go | 230 +++++++++--------- 1 file changed, 113 insertions(+), 117 deletions(-) diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_test.go b/pkg/resource-handler/controller/etcd/etcd_controller_test.go index 25ab78ee..0d9cdb93 100644 --- a/pkg/resource-handler/controller/etcd/etcd_controller_test.go +++ b/pkg/resource-handler/controller/etcd/etcd_controller_test.go @@ -15,9 +15,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/testutil" ) func TestEtcdReconciler_Reconcile(t *testing.T) { + t.Parallel() + scheme := runtime.NewScheme() _ = multigresv1alpha1.AddToScheme(scheme) _ = appsv1.AddToScheme(scheme) @@ -26,6 +29,7 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { tests := map[string]struct { etcd *multigresv1alpha1.Etcd existingObjects []client.Object + failureConfig *testutil.FailureConfig wantStatefulSet bool wantHeadlessService bool wantClientService bool @@ -101,17 +105,124 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { wantClientService: true, wantFinalizer: true, }, + "error on StatefulSet create": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnCreate: func(obj client.Object) error { + if _, ok := obj.(*appsv1.StatefulSet); ok { + return testutil.ErrPermissionError + } + return nil + }, + }, + wantErr: true, + }, + "error on headless Service create": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnCreate: func(obj client.Object) error { + if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-etcd-headless" { + return testutil.ErrPermissionError + } + return nil + }, + }, + wantErr: true, + }, + "error on client Service create": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnCreate: func(obj client.Object) error { + if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-etcd" { + return testutil.ErrPermissionError + } + return nil + }, + }, + wantErr: true, + }, + "error on status update": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnStatusUpdate: testutil.FailOnObjectName("test-etcd", testutil.ErrInjected), + }, + wantErr: true, + }, + "error on Get Etcd": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-etcd", testutil.ErrNetworkTimeout), + }, + wantErr: true, + }, + "error on finalizer Update": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnUpdate: testutil.FailOnObjectName("test-etcd", testutil.ErrInjected), + }, + wantErr: true, + }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - // Create fake client with existing objects - fakeClient := fake.NewClientBuilder(). + t.Parallel() + + // Create base fake client + baseClient := fake.NewClientBuilder(). WithScheme(scheme). WithObjects(tc.existingObjects...). WithStatusSubresource(&multigresv1alpha1.Etcd{}). Build() + fakeClient := client.Client(baseClient) + // Wrap with failure injection if configured + if tc.failureConfig != nil { + fakeClient = testutil.NewFakeClientWithFailures(baseClient, tc.failureConfig) + } + reconciler := &EtcdReconciler{ Client: fakeClient, Scheme: scheme, @@ -145,7 +256,6 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { t.Errorf("Reconcile() error = %v, wantErr %v", err, tc.wantErr) return } - if tc.wantErr { return } @@ -203,117 +313,3 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }) } } - -func TestEtcdReconciler_HandleDeletion(t *testing.T) { - scheme := runtime.NewScheme() - _ = multigresv1alpha1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - _ = corev1.AddToScheme(scheme) - - now := metav1.Now() - - tests := map[string]struct { - etcd *multigresv1alpha1.Etcd - wantFinalizerRemoved bool - }{ - "remove finalizer on deletion": { - etcd: &multigresv1alpha1.Etcd{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - DeletionTimestamp: &now, - Finalizers: []string{finalizerName}, - }, - Spec: multigresv1alpha1.EtcdSpec{}, - }, - wantFinalizerRemoved: true, - }, - "no finalizer to remove": { - etcd: &multigresv1alpha1.Etcd{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - DeletionTimestamp: &now, - Finalizers: []string{}, - }, - Spec: multigresv1alpha1.EtcdSpec{}, - }, - wantFinalizerRemoved: false, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(tc.etcd). - WithStatusSubresource(&multigresv1alpha1.Etcd{}). - Build() - - reconciler := &EtcdReconciler{ - Client: fakeClient, - Scheme: scheme, - } - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: tc.etcd.Name, - Namespace: tc.etcd.Namespace, - }, - } - - _, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Fatalf("Reconcile() unexpected error = %v", err) - } - - // Verify finalizer state - etcd := &multigresv1alpha1.Etcd{} - err = fakeClient.Get(context.Background(), types.NamespacedName{ - Name: tc.etcd.Name, - Namespace: tc.etcd.Namespace, - }, etcd) - if err != nil { - t.Fatalf("Failed to get Etcd: %v", err) - } - - hasFinalizer := slices.Contains(etcd.Finalizers, finalizerName) - if tc.wantFinalizerRemoved && hasFinalizer { - t.Errorf("Expected finalizer to be removed, but it's still present") - } - if !tc.wantFinalizerRemoved && len(tc.etcd.Finalizers) > 0 && !hasFinalizer && slices.Contains(etcd.Finalizers, finalizerName) { - t.Errorf("Expected finalizer to be present, but it's removed") - } - }) - } -} - -func TestEtcdReconciler_ReconcileNotFound(t *testing.T) { - scheme := runtime.NewScheme() - _ = multigresv1alpha1.AddToScheme(scheme) - - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - Build() - - reconciler := &EtcdReconciler{ - Client: fakeClient, - Scheme: scheme, - } - - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "nonexistent-etcd", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(context.Background(), req) - if err != nil { - t.Errorf("Reconcile() should not error on NotFound, got: %v", err) - } - - if result != (ctrl.Result{}) { - t.Errorf("Reconcile() should return empty Result on NotFound, got: %v", result) - } -} From c4aa5eefb34eb9aacf2bdd3e08fd4736f76e8e7a Mon Sep 17 00:00:00 2001 From: Ryota Date: Mon, 13 Oct 2025 03:08:31 +0100 Subject: [PATCH 15/28] Clean up object verification --- .../controller/etcd/etcd_controller_test.go | 120 +++++++++--------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_test.go b/pkg/resource-handler/controller/etcd/etcd_controller_test.go index 0d9cdb93..837b13e6 100644 --- a/pkg/resource-handler/controller/etcd/etcd_controller_test.go +++ b/pkg/resource-handler/controller/etcd/etcd_controller_test.go @@ -27,14 +27,13 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { _ = corev1.AddToScheme(scheme) tests := map[string]struct { - etcd *multigresv1alpha1.Etcd - existingObjects []client.Object - failureConfig *testutil.FailureConfig - wantStatefulSet bool - wantHeadlessService bool - wantClientService bool - wantFinalizer bool - wantErr bool + etcd *multigresv1alpha1.Etcd + existingObjects []client.Object + failureConfig *testutil.FailureConfig + // TODO: If wantErr is false but failureConfig is set, assertions may fail + // due to failure injection. This should be addressed when we need to test + // partial failures that don't prevent reconciliation success. + wantErr bool }{ "create all resources for new Etcd": { etcd: &multigresv1alpha1.Etcd{ @@ -45,10 +44,6 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { Spec: multigresv1alpha1.EtcdSpec{}, }, existingObjects: []client.Object{}, - wantStatefulSet: true, - wantHeadlessService: true, - wantClientService: true, - wantFinalizer: true, }, "update existing resources": { etcd: &multigresv1alpha1.Etcd{ @@ -84,10 +79,6 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }, }, }, - wantStatefulSet: true, - wantHeadlessService: true, - wantClientService: true, - wantFinalizer: true, }, "etcd with cellName": { etcd: &multigresv1alpha1.Etcd{ @@ -100,10 +91,6 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }, }, existingObjects: []client.Object{}, - wantStatefulSet: true, - wantHeadlessService: true, - wantClientService: true, - wantFinalizer: true, }, "error on StatefulSet create": { etcd: &multigresv1alpha1.Etcd{ @@ -260,56 +247,69 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { return } - // Verify StatefulSet was created - if tc.wantStatefulSet { - sts := &appsv1.StatefulSet{} - err := fakeClient.Get(context.Background(), types.NamespacedName{ - Name: tc.etcd.Name, - Namespace: tc.etcd.Namespace, - }, sts) - if err != nil { - t.Errorf("Expected StatefulSet to exist, got error: %v", err) - } + // For success cases, verify all resources were created with correct labels + expectedCellName := tc.etcd.Spec.CellName + if expectedCellName == "" { + expectedCellName = "multigres-global-topo" } - // Verify headless Service was created - if tc.wantHeadlessService { - svc := &corev1.Service{} - err := fakeClient.Get(context.Background(), types.NamespacedName{ - Name: tc.etcd.Name + "-headless", - Namespace: tc.etcd.Namespace, - }, svc) - if err != nil { - t.Errorf("Expected headless Service to exist, got error: %v", err) + // Verify StatefulSet + sts := &appsv1.StatefulSet{} + err = fakeClient.Get(context.Background(), types.NamespacedName{ + Name: tc.etcd.Name, + Namespace: tc.etcd.Namespace, + }, sts) + if err != nil { + t.Errorf("StatefulSet should exist, got error: %v", err) + } else { + if sts.Labels["multigres.com/cell"] != expectedCellName { + t.Errorf("StatefulSet cell label = %v, want %v", sts.Labels["multigres.com/cell"], expectedCellName) + } + if sts.Labels["app.kubernetes.io/component"] != "etcd" { + t.Errorf("StatefulSet component label = %v, want etcd", sts.Labels["app.kubernetes.io/component"]) } } - // Verify client Service was created - if tc.wantClientService { - svc := &corev1.Service{} - err := fakeClient.Get(context.Background(), types.NamespacedName{ - Name: tc.etcd.Name, - Namespace: tc.etcd.Namespace, - }, svc) - if err != nil { - t.Errorf("Expected client Service to exist, got error: %v", err) + // Verify headless Service + headlessSvc := &corev1.Service{} + err = fakeClient.Get(context.Background(), types.NamespacedName{ + Name: tc.etcd.Name + "-headless", + Namespace: tc.etcd.Namespace, + }, headlessSvc) + if err != nil { + t.Errorf("Headless Service should exist, got error: %v", err) + } else { + if headlessSvc.Labels["multigres.com/cell"] != expectedCellName { + t.Errorf("Headless Service cell label = %v, want %v", headlessSvc.Labels["multigres.com/cell"], expectedCellName) } } - // Verify finalizer was added - if tc.wantFinalizer { - etcd := &multigresv1alpha1.Etcd{} - err := fakeClient.Get(context.Background(), types.NamespacedName{ - Name: tc.etcd.Name, - Namespace: tc.etcd.Namespace, - }, etcd) - if err != nil { - t.Fatalf("Failed to get Etcd: %v", err) - } - if !slices.Contains(etcd.Finalizers, finalizerName) { - t.Errorf("Expected finalizer %s to be present", finalizerName) + // Verify client Service + clientSvc := &corev1.Service{} + err = fakeClient.Get(context.Background(), types.NamespacedName{ + Name: tc.etcd.Name, + Namespace: tc.etcd.Namespace, + }, clientSvc) + if err != nil { + t.Errorf("Client Service should exist, got error: %v", err) + } else { + if clientSvc.Labels["multigres.com/cell"] != expectedCellName { + t.Errorf("Client Service cell label = %v, want %v", clientSvc.Labels["multigres.com/cell"], expectedCellName) } } + + // Verify finalizer + etcd := &multigresv1alpha1.Etcd{} + err = fakeClient.Get(context.Background(), types.NamespacedName{ + Name: tc.etcd.Name, + Namespace: tc.etcd.Namespace, + }, etcd) + if err != nil { + t.Fatalf("Failed to get Etcd: %v", err) + } + if !slices.Contains(etcd.Finalizers, finalizerName) { + t.Errorf("Finalizer %s should be present", finalizerName) + } }) } } From 4381ba083715d190b55e19b66d6a226ee7f51272 Mon Sep 17 00:00:00 2001 From: Ryota Date: Tue, 14 Oct 2025 02:18:01 +0100 Subject: [PATCH 16/28] Add requeue check --- .../controller/etcd/etcd_controller_test.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_test.go b/pkg/resource-handler/controller/etcd/etcd_controller_test.go index 837b13e6..049e80af 100644 --- a/pkg/resource-handler/controller/etcd/etcd_controller_test.go +++ b/pkg/resource-handler/controller/etcd/etcd_controller_test.go @@ -33,7 +33,8 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { // TODO: If wantErr is false but failureConfig is set, assertions may fail // due to failure injection. This should be addressed when we need to test // partial failures that don't prevent reconciliation success. - wantErr bool + wantErr bool + wantRequeue bool }{ "create all resources for new Etcd": { etcd: &multigresv1alpha1.Etcd{ @@ -43,7 +44,7 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }, Spec: multigresv1alpha1.EtcdSpec{}, }, - existingObjects: []client.Object{}, + existingObjects: []client.Object{}, }, "update existing resources": { etcd: &multigresv1alpha1.Etcd{ @@ -90,7 +91,7 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { CellName: "zone1", }, }, - existingObjects: []client.Object{}, + existingObjects: []client.Object{}, }, "error on StatefulSet create": { etcd: &multigresv1alpha1.Etcd{ @@ -238,7 +239,7 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }, } - _, err := reconciler.Reconcile(context.Background(), req) + result, err := reconciler.Reconcile(context.Background(), req) if (err != nil) != tc.wantErr { t.Errorf("Reconcile() error = %v, wantErr %v", err, tc.wantErr) return @@ -247,6 +248,11 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { return } + // Check requeue + if result.Requeue != tc.wantRequeue { + t.Errorf("Reconcile() result.Requeue = %v, want %v", result.Requeue, tc.wantRequeue) + } + // For success cases, verify all resources were created with correct labels expectedCellName := tc.etcd.Spec.CellName if expectedCellName == "" { From 9355f3dbaea3763845910dd8ca306b816fb485a2 Mon Sep 17 00:00:00 2001 From: Ryota Date: Tue, 14 Oct 2025 02:19:01 +0100 Subject: [PATCH 17/28] Add namespace + name key test utility --- .../controller/testutil/fake_client.go | 10 +++++++ .../controller/testutil/fake_client_test.go | 30 +++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/pkg/resource-handler/controller/testutil/fake_client.go b/pkg/resource-handler/controller/testutil/fake_client.go index f0582739..74cebbc3 100644 --- a/pkg/resource-handler/controller/testutil/fake_client.go +++ b/pkg/resource-handler/controller/testutil/fake_client.go @@ -177,6 +177,16 @@ func FailOnKeyName(name string, err error) func(client.ObjectKey) error { } } +// FailOnNamespacedKeyName returns an error if both the key name and namespace match. +func FailOnNamespacedKeyName(name, namespace string, err error) func(client.ObjectKey) error { + return func(key client.ObjectKey) error { + if key.Name == name && key.Namespace == namespace { + return err + } + return nil + } +} + // FailOnNamespace returns an error if the namespace matches. func FailOnNamespace(namespace string, err error) func(client.Object) error { return func(obj client.Object) error { diff --git a/pkg/resource-handler/controller/testutil/fake_client_test.go b/pkg/resource-handler/controller/testutil/fake_client_test.go index 7ce399a3..5020902e 100644 --- a/pkg/resource-handler/controller/testutil/fake_client_test.go +++ b/pkg/resource-handler/controller/testutil/fake_client_test.go @@ -566,6 +566,36 @@ func TestHelperFunctions(t *testing.T) { } }) + t.Run("FailOnNamespacedKeyName - matching name and namespace", func(t *testing.T) { + t.Parallel() + + fn := FailOnNamespacedKeyName("test-pod", "default", ErrInjected) + err := fn(client.ObjectKey{Name: "test-pod", Namespace: "default"}) + if err != ErrInjected { + t.Errorf("Expected ErrInjected, got %v", err) + } + }) + + t.Run("FailOnNamespacedKeyName - matching name but different namespace", func(t *testing.T) { + t.Parallel() + + fn := FailOnNamespacedKeyName("test-pod", "default", ErrInjected) + err := fn(client.ObjectKey{Name: "test-pod", Namespace: "kube-system"}) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + }) + + t.Run("FailOnNamespacedKeyName - different name but matching namespace", func(t *testing.T) { + t.Parallel() + + fn := FailOnNamespacedKeyName("test-pod", "default", ErrInjected) + err := fn(client.ObjectKey{Name: "other-pod", Namespace: "default"}) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + }) + t.Run("FailOnNamespace - matching namespace", func(t *testing.T) { t.Parallel() From 2fa5d1619802c422e5baae61cab9567562d86322 Mon Sep 17 00:00:00 2001 From: Ryota Date: Tue, 14 Oct 2025 02:19:31 +0100 Subject: [PATCH 18/28] Introduce concrete type after N call failure util --- .../controller/testutil/fake_client.go | 45 ++++++++++++++----- .../controller/testutil/fake_client_test.go | 44 ++++++++++++++---- 2 files changed, 71 insertions(+), 18 deletions(-) diff --git a/pkg/resource-handler/controller/testutil/fake_client.go b/pkg/resource-handler/controller/testutil/fake_client.go index 74cebbc3..ec5a76f4 100644 --- a/pkg/resource-handler/controller/testutil/fake_client.go +++ b/pkg/resource-handler/controller/testutil/fake_client.go @@ -202,23 +202,48 @@ func FailOnNamespace(namespace string, err error) func(client.Object) error { } // AlwaysFail returns the given error for all operations. -func AlwaysFail(err error) func(interface{}) error { +func AlwaysFail(err error) func(any) error { return func(interface{}) error { return err } } -// FailAfterNCalls returns an error after N successful calls. -func FailAfterNCalls(n int, err error) func() func(interface{}) error { +// FailKeyAfterNCalls returns an ObjectKey failure function that fails after N successful calls. +// Use for OnGet. +func FailKeyAfterNCalls(n int, err error) func(client.ObjectKey) error { count := 0 - return func() func(interface{}) error { - return func(interface{}) error { - count++ - if count > n { - return err - } - return nil + return func(client.ObjectKey) error { + count++ + if count > n { + return err + } + return nil + } +} + +// FailObjAfterNCalls returns an Object failure function that fails after N successful calls. +// Use for OnCreate, OnUpdate, OnDelete, OnPatch, OnDeleteAllOf, OnStatusUpdate, OnStatusPatch. +func FailObjAfterNCalls(n int, err error) func(client.Object) error { + count := 0 + return func(client.Object) error { + count++ + if count > n { + return err } + return nil + } +} + +// FailObjListAfterNCalls returns an ObjectList failure function that fails after N successful calls. +// Use for OnList. +func FailObjListAfterNCalls(n int, err error) func(client.ObjectList) error { + count := 0 + return func(client.ObjectList) error { + count++ + if count > n { + return err + } + return nil } } diff --git a/pkg/resource-handler/controller/testutil/fake_client_test.go b/pkg/resource-handler/controller/testutil/fake_client_test.go index 5020902e..97867777 100644 --- a/pkg/resource-handler/controller/testutil/fake_client_test.go +++ b/pkg/resource-handler/controller/testutil/fake_client_test.go @@ -616,29 +616,57 @@ func TestHelperFunctions(t *testing.T) { } }) - t.Run("FailAfterNCalls", func(t *testing.T) { + t.Run("FailKeyAfterNCalls", func(t *testing.T) { t.Parallel() - fn := FailAfterNCalls(2, ErrInjected)() + fn := FailKeyAfterNCalls(2, ErrInjected) // First call - should succeed - if err := fn(nil); err != nil { + if err := fn(client.ObjectKey{Name: "test", Namespace: "default"}); err != nil { t.Errorf("Call 1: expected no error, got %v", err) } // Second call - should succeed - if err := fn(nil); err != nil { + if err := fn(client.ObjectKey{Name: "test", Namespace: "default"}); err != nil { t.Errorf("Call 2: expected no error, got %v", err) } // Third call - should fail - if err := fn(nil); err != ErrInjected { + if err := fn(client.ObjectKey{Name: "test", Namespace: "default"}); err != ErrInjected { t.Errorf("Call 3: expected ErrInjected, got %v", err) } + }) + + t.Run("FailObjAfterNCalls", func(t *testing.T) { + t.Parallel() + + fn := FailObjAfterNCalls(1, ErrPermissionError) + + // First call - should succeed + if err := fn(pod); err != nil { + t.Errorf("Call 1: expected no error, got %v", err) + } + + // Second call - should fail + if err := fn(pod); err != ErrPermissionError { + t.Errorf("Call 2: expected ErrPermissionError, got %v", err) + } + }) + + t.Run("FailObjListAfterNCalls", func(t *testing.T) { + t.Parallel() + + fn := FailObjListAfterNCalls(1, ErrNetworkTimeout) + podList := &corev1.PodList{} + + // First call - should succeed + if err := fn(podList); err != nil { + t.Errorf("Call 1: expected no error, got %v", err) + } - // Fourth call - should fail - if err := fn(nil); err != ErrInjected { - t.Errorf("Call 4: expected ErrInjected, got %v", err) + // Second call - should fail + if err := fn(podList); err != ErrNetworkTimeout { + t.Errorf("Call 2: expected ErrNetworkTimeout, got %v", err) } }) From d856c229e0be26aaaabe677d4214afdd590eaa27 Mon Sep 17 00:00:00 2001 From: Ryota Date: Tue, 14 Oct 2025 02:20:33 +0100 Subject: [PATCH 19/28] Add more test cases for reconciliation --- .../controller/etcd/etcd_controller_test.go | 271 ++++++++++++++++++ 1 file changed, 271 insertions(+) diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_test.go b/pkg/resource-handler/controller/etcd/etcd_controller_test.go index 049e80af..91c55756 100644 --- a/pkg/resource-handler/controller/etcd/etcd_controller_test.go +++ b/pkg/resource-handler/controller/etcd/etcd_controller_test.go @@ -192,6 +192,277 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }, wantErr: true, }, + "error on StatefulSet Update": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{ + Replicas: int32Ptr(5), + }, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: int32Ptr(3), + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: func(obj client.Object) error { + if _, ok := obj.(*appsv1.StatefulSet); ok { + return testutil.ErrInjected + } + return nil + }, + }, + wantErr: true, + }, + "error on headless Service Update": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-headless", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: func(obj client.Object) error { + if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-etcd-headless" { + return testutil.ErrInjected + } + return nil + }, + }, + wantErr: true, + }, + "error on client Service Update": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-headless", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: func(obj client.Object) error { + if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-etcd" { + return testutil.ErrInjected + } + return nil + }, + }, + wantErr: true, + }, + "error on Get StatefulSet in updateStatus": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-status", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-status", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + // Fail StatefulSet Get after first successful call + // First Get succeeds (in reconcileStatefulSet) + // Second Get fails (in updateStatus) + OnGet: testutil.FailKeyAfterNCalls(1, testutil.ErrNetworkTimeout), + }, + wantErr: true, + }, + "deletion with finalizer": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + }, + "all replicas ready status": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-ready", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{ + Replicas: int32Ptr(3), + }, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-ready", + Namespace: "default", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: int32Ptr(3), + }, + Status: appsv1.StatefulSetStatus{ + Replicas: 3, + ReadyReplicas: 3, + }, + }, + }, + }, + "error on Get StatefulSet (not NotFound)": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnGet: func(key client.ObjectKey) error { + // Fail StatefulSet Get with non-NotFound error + if key.Name == "test-etcd" { + return testutil.ErrNetworkTimeout + } + return nil + }, + }, + wantErr: true, + }, + "error on Get headless Service (not NotFound)": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnGet: func(key client.ObjectKey) error { + // Fail headless Service Get with non-NotFound error + if key.Name == "test-etcd-headless" { + return testutil.ErrNetworkTimeout + } + return nil + }, + }, + wantErr: true, + }, + "error on Get client Service (not NotFound)": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-svc", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-svc", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-svc-headless", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnGet: testutil.FailOnNamespacedKeyName("test-etcd-svc", "default", testutil.ErrNetworkTimeout), + }, + wantErr: true, + }, + "deletion error on finalizer removal": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-del", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{ + &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-del", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: testutil.FailOnObjectName("test-etcd-del", testutil.ErrInjected), + }, + wantErr: true, + }, } for name, tc := range tests { From c11dc73fac6a6f13cb67c24e04e2d4f2d4c222de Mon Sep 17 00:00:00 2001 From: Ryota Date: Tue, 14 Oct 2025 02:21:10 +0100 Subject: [PATCH 20/28] Add NotFound reconcile testing --- .../controller/etcd/etcd_controller_test.go | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_test.go b/pkg/resource-handler/controller/etcd/etcd_controller_test.go index 91c55756..c5fedd19 100644 --- a/pkg/resource-handler/controller/etcd/etcd_controller_test.go +++ b/pkg/resource-handler/controller/etcd/etcd_controller_test.go @@ -590,3 +590,35 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }) } } + +func TestEtcdReconciler_ReconcileNotFound(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + // Reconcile non-existent resource + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "nonexistent-etcd", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + if err != nil { + t.Errorf("Reconcile() should not error on NotFound, got: %v", err) + } + if result.Requeue { + t.Errorf("Reconcile() should not requeue on NotFound") + } +} From 7ab7fd0f5b4259da698bc257cfaf72095b4c4e23 Mon Sep 17 00:00:00 2001 From: Ryota Date: Tue, 14 Oct 2025 02:21:31 +0100 Subject: [PATCH 21/28] Add storage testing --- .../controller/etcd/statefulset_test.go | 217 ++++++++++++++++++ 1 file changed, 217 insertions(+) diff --git a/pkg/resource-handler/controller/etcd/statefulset_test.go b/pkg/resource-handler/controller/etcd/statefulset_test.go index 1cd6a8e2..8e4d3a01 100644 --- a/pkg/resource-handler/controller/etcd/statefulset_test.go +++ b/pkg/resource-handler/controller/etcd/statefulset_test.go @@ -21,6 +21,10 @@ func boolPtr(b bool) *bool { return &b } +func stringPtr(s string) *string { + return &s +} + func TestBuildStatefulSet(t *testing.T) { scheme := runtime.NewScheme() _ = multigresv1alpha1.AddToScheme(scheme) @@ -232,6 +236,219 @@ func TestBuildStatefulSet(t *testing.T) { }, }, }, + "custom storage size": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{ + StorageSize: "20Gi", + }, + }, + scheme: scheme, + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "test-etcd", + UID: "test-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "test-etcd-headless", + Replicas: int32Ptr(3), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "etcd", + Image: DefaultImage, + Resources: corev1.ResourceRequirements{}, + Env: buildEtcdEnv("test-etcd", "default", 3, "test-etcd-headless"), + Ports: buildContainerPorts(nil), + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("20Gi"), + }, + }, + }, + }, + }, + }, + }, + }, + "custom VolumeClaimTemplate": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.EtcdSpec{ + VolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("50Gi"), + }, + }, + StorageClassName: stringPtr("fast-ssd"), + }, + }, + }, + scheme: scheme, + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Etcd", + Name: "test-etcd", + UID: "test-uid", + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }, + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "test-etcd-headless", + Replicas: int32Ptr(3), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-etcd", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "multigres-global-topo", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "etcd", + Image: DefaultImage, + Resources: corev1.ResourceRequirements{}, + Env: buildEtcdEnv("test-etcd", "default", 3, "test-etcd-headless"), + Ports: buildContainerPorts(nil), + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("50Gi"), + }, + }, + StorageClassName: stringPtr("fast-ssd"), + }, + }, + }, + }, + }, + }, "scheme without Etcd type - should error": { etcd: &multigresv1alpha1.Etcd{ ObjectMeta: metav1.ObjectMeta{ From bdbd6b13d188bf257c87aacc7b96b28d081197fc Mon Sep 17 00:00:00 2001 From: Ryota Date: Tue, 14 Oct 2025 02:21:44 +0100 Subject: [PATCH 22/28] Add internal testing for defensive coverage --- .../etcd/etcd_controller_internal_test.go | 235 ++++++++++++++++++ 1 file changed, 235 insertions(+) create mode 100644 pkg/resource-handler/controller/etcd/etcd_controller_internal_test.go diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_internal_test.go b/pkg/resource-handler/controller/etcd/etcd_controller_internal_test.go new file mode 100644 index 00000000..ca7f04c0 --- /dev/null +++ b/pkg/resource-handler/controller/etcd/etcd_controller_internal_test.go @@ -0,0 +1,235 @@ +package etcd + +import ( + "context" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/testutil" +) + +// TestReconcileStatefulSet_InvalidScheme tests the error path when BuildStatefulSet fails. +// This should never happen in production - scheme is properly set up in main.go. +// Test exists for coverage of defensive error handling. +func TestReconcileStatefulSet_InvalidScheme(t *testing.T) { + // Empty scheme without Etcd type registered + invalidScheme := runtime.NewScheme() + + etcd := &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(invalidScheme). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: invalidScheme, + } + + err := reconciler.reconcileStatefulSet(context.Background(), etcd) + if err == nil { + t.Error("reconcileStatefulSet() should error with invalid scheme") + } +} + +// TestReconcileHeadlessService_InvalidScheme tests the error path when BuildHeadlessService fails. +func TestReconcileHeadlessService_InvalidScheme(t *testing.T) { + invalidScheme := runtime.NewScheme() + + etcd := &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(invalidScheme). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: invalidScheme, + } + + err := reconciler.reconcileHeadlessService(context.Background(), etcd) + if err == nil { + t.Error("reconcileHeadlessService() should error with invalid scheme") + } +} + +// TestReconcileClientService_InvalidScheme tests the error path when BuildClientService fails. +func TestReconcileClientService_InvalidScheme(t *testing.T) { + invalidScheme := runtime.NewScheme() + + etcd := &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(invalidScheme). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: invalidScheme, + } + + err := reconciler.reconcileClientService(context.Background(), etcd) + if err == nil { + t.Error("reconcileClientService() should error with invalid scheme") + } +} + +// TestUpdateStatus_StatefulSetNotFound tests the NotFound path in updateStatus. +func TestUpdateStatus_StatefulSetNotFound(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) // Need StatefulSet type registered for Get to work + + etcd := &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(etcd). + WithStatusSubresource(&multigresv1alpha1.Etcd{}). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + // Call updateStatus when StatefulSet doesn't exist yet + err := reconciler.updateStatus(context.Background(), etcd) + if err != nil { + t.Errorf("updateStatus() should not error when StatefulSet not found, got: %v", err) + } +} + +// TestHandleDeletion_NoFinalizer tests early return when no finalizer is present. +func TestHandleDeletion_NoFinalizer(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + etcd := &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + Finalizers: []string{}, // No finalizer + }, + Spec: multigresv1alpha1.EtcdSpec{}, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(etcd). + Build() + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + result, err := reconciler.handleDeletion(context.Background(), etcd) + if err != nil { + t.Errorf("handleDeletion() should not error when no finalizer, got: %v", err) + } + if result.Requeue { + t.Error("handleDeletion() should not requeue when no finalizer") + } +} + +// TestReconcileClientService_GetError tests error path on Get client Service (not NotFound). +func TestReconcileClientService_GetError(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + etcd := &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + } + + // Create client with failure injection + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(etcd). + Build() + + fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-etcd", testutil.ErrNetworkTimeout), + }) + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + err := reconciler.reconcileClientService(context.Background(), etcd) + if err == nil { + t.Error("reconcileClientService() should error on Get failure") + } +} + +// TestUpdateStatus_GetError tests error path on Get StatefulSet (not NotFound). +func TestUpdateStatus_GetError(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + + etcd := &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd", + Namespace: "default", + }, + Spec: multigresv1alpha1.EtcdSpec{}, + } + + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(etcd). + WithStatusSubresource(&multigresv1alpha1.Etcd{}). + Build() + + fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-etcd", testutil.ErrNetworkTimeout), + }) + + reconciler := &EtcdReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + err := reconciler.updateStatus(context.Background(), etcd) + if err == nil { + t.Error("updateStatus() should error on Get failure") + } +} From 6e22f2b31d7368138704cb3edb00685770b88a9f Mon Sep 17 00:00:00 2001 From: Ryota Date: Tue, 14 Oct 2025 02:23:30 +0100 Subject: [PATCH 23/28] Add TODO comment about missing test coverage --- pkg/resource-handler/controller/etcd/etcd_controller.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/resource-handler/controller/etcd/etcd_controller.go b/pkg/resource-handler/controller/etcd/etcd_controller.go index 826a0103..bd49c0ba 100644 --- a/pkg/resource-handler/controller/etcd/etcd_controller.go +++ b/pkg/resource-handler/controller/etcd/etcd_controller.go @@ -257,6 +257,7 @@ func (r *EtcdReconciler) buildConditions(etcd *multigresv1alpha1.Etcd, sts *apps } // SetupWithManager sets up the controller with the Manager. +// TODO: This is missing test coverage, and will need to use envtest setup. func (r *EtcdReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&multigresv1alpha1.Etcd{}). From a4b0f2259c5d6b5d02ab01b7f7cfb27f31064db7 Mon Sep 17 00:00:00 2001 From: Ryota Date: Tue, 14 Oct 2025 02:37:27 +0100 Subject: [PATCH 24/28] Remove unused file --- .../bases/multigres.multigres.com_etcds.yaml | 1607 ----------------- 1 file changed, 1607 deletions(-) delete mode 100644 config/crd/bases/multigres.multigres.com_etcds.yaml diff --git a/config/crd/bases/multigres.multigres.com_etcds.yaml b/config/crd/bases/multigres.multigres.com_etcds.yaml deleted file mode 100644 index 0668a6fd..00000000 --- a/config/crd/bases/multigres.multigres.com_etcds.yaml +++ /dev/null @@ -1,1607 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.18.0 - name: etcds.multigres.multigres.com -spec: - group: multigres.multigres.com - names: - kind: Etcd - listKind: EtcdList - plural: etcds - singular: etcd - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.ready - name: Ready - type: boolean - - jsonPath: .status.replicas - name: DesiredReplicas - type: string - - jsonPath: .status.readyReplicas - name: ReadyReplicas - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: Etcd is the Schema for the etcds API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: spec defines the desired state of Etcd - properties: - affinity: - description: Affinity defines pod affinity and anti-affinity rules. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the - pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. - items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - type: array - x-kubernetes-list-type: atomic - required: - - nodeSelectorTerms - type: object - x-kubernetes-map-type: atomic - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate - this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. - avoid putting this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and subtracting - "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - type: object - cellName: - description: CellName is the name of the cell this Etcd belongs to. - minLength: 1 - type: string - image: - default: gcr.io/etcd-development/etcd:v3.5.9 - description: |- - Image is the container image for Etcd. - NOTE: The version information is taken from Multigres repo's local - provisioning setup: - https://github.com/multigres/multigres/blob/38264ed3cb5049961a1e3d8a9de4836f8215ca76/go/provisioner/local/config.go#L186 - minLength: 1 - type: string - imagePullSecrets: - description: |- - ImagePullSecrets is an optional list of references to secrets in the same namespace - to use for pulling the image. - items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: array - nodeSelector: - additionalProperties: - type: string - description: NodeSelector is a selector which must be true for the - pod to fit on a node. - type: object - podAnnotations: - additionalProperties: - type: string - description: PodAnnotations are annotations to add to the Etcd pods. - type: object - podLabels: - additionalProperties: - type: string - description: |- - PodLabels are additional labels to add to the Etcd pods. - These are merged with the standard labels generated by the operator. - In case of a key conflict, the operator's standard labels take precedence. - type: object - replicas: - default: 3 - description: |- - Replicas is the desired number of Etcd members. - For high availability, use an odd number (typically 3 or 5). - format: int32 - minimum: 0 - type: integer - resources: - description: Resources defines the resource requirements for the Etcd - container. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This field depends on the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - serviceAccountName: - description: ServiceAccountName is the name of the ServiceAccount - to use for the Etcd pods. - type: string - storageClassName: - description: |- - StorageClassName is the name of the StorageClass to use for Etcd data volumes. - If not specified, the default StorageClass will be used. - type: string - storageSize: - default: 10Gi - description: StorageSize is the size of the persistent volume for - each Etcd member. - type: string - tolerations: - description: Tolerations allows pods to schedule onto nodes with matching - taints. - items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . - properties: - effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraints controls how pods are spread - across topology domains. - items: - description: TopologySpreadConstraint specifies how to spread matching - pods among the given topology. - properties: - labelSelector: - description: |- - LabelSelector is used to find matching pods. - Pods that match this label selector are counted to determine the number of pods - in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select the pods over which - spreading will be calculated. The keys are used to lookup values from the - incoming pod labels, those key-value labels are ANDed with labelSelector - to select the group of existing pods over which spreading will be calculated - for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - MatchLabelKeys cannot be set when LabelSelector isn't set. - Keys that don't exist in the incoming pod labels will - be ignored. A null or empty list means only match against labelSelector. - - This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - maxSkew: - description: |- - MaxSkew describes the degree to which pods may be unevenly distributed. - When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference - between the number of matching pods in the target topology and the global minimum. - The global minimum is the minimum number of matching pods in an eligible domain - or zero if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 2/2/1: - In this case, the global minimum is 1. - | zone1 | zone2 | zone3 | - | P P | P P | P | - - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; - scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) - violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto any zone. - When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence - to topologies that satisfy it. - It's a required field. Default value is 1 and 0 is not allowed. - format: int32 - type: integer - minDomains: - description: |- - MinDomains indicates a minimum number of eligible domains. - When the number of eligible domains with matching topology keys is less than minDomains, - Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. - And when the number of eligible domains with matching topology keys equals or greater than minDomains, - this value has no effect on scheduling. - As a result, when the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to those domains. - If value is nil, the constraint behaves as if MinDomains is equal to 1. - Valid values are integers greater than 0. - When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same - labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | - | P P | P P | P P | - The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. - In this situation, new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, - it will violate MaxSkew. - format: int32 - type: integer - nodeAffinityPolicy: - description: |- - NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector - when calculating pod topology spread skew. Options are: - - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - - If this value is nil, the behavior is equivalent to the Honor policy. - type: string - nodeTaintsPolicy: - description: |- - NodeTaintsPolicy indicates how we will treat node taints when calculating - pod topology spread skew. Options are: - - Honor: nodes without taints, along with tainted nodes for which the incoming pod - has a toleration, are included. - - Ignore: node taints are ignored. All nodes are included. - - If this value is nil, the behavior is equivalent to the Ignore policy. - type: string - topologyKey: - description: |- - TopologyKey is the key of node labels. Nodes that have a label with this key - and identical values are considered to be in the same topology. - We consider each as a "bucket", and try to put balanced number - of pods into each bucket. - We define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose nodes meet the requirements of - nodeAffinityPolicy and nodeTaintsPolicy. - e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. - And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. - It's a required field. - type: string - whenUnsatisfiable: - description: |- - WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy - the spread constraint. - - DoNotSchedule (default) tells the scheduler not to schedule it. - - ScheduleAnyway tells the scheduler to schedule the pod in any location, - but giving higher precedence to topologies that would help reduce the - skew. - A constraint is considered "Unsatisfiable" for an incoming pod - if and only if every possible node assignment for that pod would violate - "MaxSkew" on some topology. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 3/1/1: - | zone1 | zone2 | zone3 | - | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled - to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler - won't make it *more* imbalanced. - It's a required field. - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - volumeClaimTemplate: - description: |- - VolumeClaimTemplate allows customization of the PersistentVolumeClaim for Etcd data. - If specified, this takes precedence over StorageClassName and StorageSize. - properties: - accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - items: - type: string - type: array - x-kubernetes-list-type: atomic - dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - selector: - description: selector is a label query over volumes to consider - for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - type: string - volumeAttributesClassName: - description: |- - volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. - If specified, the CSI driver will create or update the volume with the attributes defined - in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string or nil value indicates that no - VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, - this field can be reset to its previous value (including nil) to cancel the modification. - If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be - set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource - exists. - More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - type: string - volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - type: object - status: - description: status defines the observed state of Etcd - properties: - conditions: - description: Conditions represent the latest available observations - of the Etcd cluster's state. - items: - description: Condition contains details for one aspect of the current - state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: ObservedGeneration reflects the generation of the most - recently observed Etcd spec. - format: int64 - type: integer - ready: - description: Ready indicates whether the Etcd cluster is healthy and - available. - type: boolean - readyReplicas: - description: ReadyReplicas is the number of ready Etcd members. - format: int32 - type: integer - replicas: - description: Replicas is the desired number of Etcd members. - format: int32 - type: integer - required: - - ready - - readyReplicas - - replicas - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} From 0baa63a1f557270e4e021feac463b21ac4cb2bae Mon Sep 17 00:00:00 2001 From: Ryota Date: Tue, 14 Oct 2025 02:57:38 +0100 Subject: [PATCH 25/28] Add extra assertion to check updated resource --- .../controller/etcd/etcd_controller_test.go | 31 ++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_test.go b/pkg/resource-handler/controller/etcd/etcd_controller_test.go index c5fedd19..32038680 100644 --- a/pkg/resource-handler/controller/etcd/etcd_controller_test.go +++ b/pkg/resource-handler/controller/etcd/etcd_controller_test.go @@ -35,6 +35,7 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { // partial failures that don't prevent reconciliation success. wantErr bool wantRequeue bool + assertFunc func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) }{ "create all resources for new Etcd": { etcd: &multigresv1alpha1.Etcd{ @@ -55,6 +56,7 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }, Spec: multigresv1alpha1.EtcdSpec{ Replicas: int32Ptr(5), + Image: "quay.io/coreos/etcd:v3.5.15", }, }, existingObjects: []client.Object{ @@ -64,7 +66,11 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { Namespace: "default", }, Spec: appsv1.StatefulSetSpec{ - Replicas: int32Ptr(3), // old value + Replicas: int32Ptr(3), // will be updated to 5 + }, + Status: appsv1.StatefulSetStatus{ + Replicas: 3, + ReadyReplicas: 3, }, }, &corev1.Service{ @@ -80,6 +86,24 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }, }, }, + assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { + sts := &appsv1.StatefulSet{} + err := c.Get(context.Background(), types.NamespacedName{ + Name: "existing-etcd", + Namespace: "default", + }, sts) + if err != nil { + t.Fatalf("Failed to get StatefulSet: %v", err) + } + + if *sts.Spec.Replicas != 5 { + t.Errorf("StatefulSet replicas = %d, want 5", *sts.Spec.Replicas) + } + + if sts.Spec.Template.Spec.Containers[0].Image != "quay.io/coreos/etcd:v3.5.15" { + t.Errorf("StatefulSet image = %s, want quay.io/coreos/etcd:v3.5.15", sts.Spec.Template.Spec.Containers[0].Image) + } + }, }, "etcd with cellName": { etcd: &multigresv1alpha1.Etcd{ @@ -524,6 +548,11 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { t.Errorf("Reconcile() result.Requeue = %v, want %v", result.Requeue, tc.wantRequeue) } + // Run custom assertions if provided + if tc.assertFunc != nil { + tc.assertFunc(t, fakeClient, tc.etcd) + } + // For success cases, verify all resources were created with correct labels expectedCellName := tc.etcd.Spec.CellName if expectedCellName == "" { From 066ece74f6c5c6ced4823e32fe46bef426184035 Mon Sep 17 00:00:00 2001 From: Ryota Date: Tue, 14 Oct 2025 03:06:27 +0100 Subject: [PATCH 26/28] Add more assertion functions for success cases --- .../controller/etcd/etcd_controller_test.go | 85 +++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_test.go b/pkg/resource-handler/controller/etcd/etcd_controller_test.go index 32038680..41f08b18 100644 --- a/pkg/resource-handler/controller/etcd/etcd_controller_test.go +++ b/pkg/resource-handler/controller/etcd/etcd_controller_test.go @@ -46,6 +46,29 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { Spec: multigresv1alpha1.EtcdSpec{}, }, existingObjects: []client.Object{}, + assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { + // Verify all three resources were created + sts := &appsv1.StatefulSet{} + if err := c.Get(context.Background(), types.NamespacedName{Name: "test-etcd", Namespace: "default"}, sts); err != nil { + t.Errorf("StatefulSet should exist: %v", err) + } + + headlessSvc := &corev1.Service{} + if err := c.Get(context.Background(), types.NamespacedName{Name: "test-etcd-headless", Namespace: "default"}, headlessSvc); err != nil { + t.Errorf("Headless Service should exist: %v", err) + } + + clientSvc := &corev1.Service{} + if err := c.Get(context.Background(), types.NamespacedName{Name: "test-etcd", Namespace: "default"}, clientSvc); err != nil { + t.Errorf("Client Service should exist: %v", err) + } + + // Verify default values were applied + // Note: Only checking replicas here - full resource validation is in statefulset_test.go + if *sts.Spec.Replicas != DefaultReplicas { + t.Errorf("StatefulSet replicas = %d, want default %d", *sts.Spec.Replicas, DefaultReplicas) + } + }, }, "update existing resources": { etcd: &multigresv1alpha1.Etcd{ @@ -116,6 +139,31 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }, }, existingObjects: []client.Object{}, + assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { + sts := &appsv1.StatefulSet{} + if err := c.Get(context.Background(), types.NamespacedName{Name: "etcd-zone1", Namespace: "default"}, sts); err != nil { + t.Fatalf("Failed to get StatefulSet: %v", err) + } + if sts.Labels["multigres.com/cell"] != "zone1" { + t.Errorf("StatefulSet cell label = %s, want zone1", sts.Labels["multigres.com/cell"]) + } + + headlessSvc := &corev1.Service{} + if err := c.Get(context.Background(), types.NamespacedName{Name: "etcd-zone1-headless", Namespace: "default"}, headlessSvc); err != nil { + t.Fatalf("Failed to get headless Service: %v", err) + } + if headlessSvc.Labels["multigres.com/cell"] != "zone1" { + t.Errorf("Headless Service cell label = %s, want zone1", headlessSvc.Labels["multigres.com/cell"]) + } + + clientSvc := &corev1.Service{} + if err := c.Get(context.Background(), types.NamespacedName{Name: "etcd-zone1", Namespace: "default"}, clientSvc); err != nil { + t.Fatalf("Failed to get client Service: %v", err) + } + if clientSvc.Labels["multigres.com/cell"] != "zone1" { + t.Errorf("Client Service cell label = %s, want zone1", clientSvc.Labels["multigres.com/cell"]) + } + }, }, "error on StatefulSet create": { etcd: &multigresv1alpha1.Etcd{ @@ -356,6 +404,16 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { Spec: multigresv1alpha1.EtcdSpec{}, }, existingObjects: []client.Object{}, + assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { + updatedEtcd := &multigresv1alpha1.Etcd{} + if err := c.Get(context.Background(), types.NamespacedName{Name: "test-etcd", Namespace: "default"}, updatedEtcd); err != nil { + t.Fatalf("Failed to get Etcd: %v", err) + } + + if slices.Contains(updatedEtcd.Finalizers, finalizerName) { + t.Errorf("Finalizer %s should be removed", finalizerName) + } + }, }, "all replicas ready status": { etcd: &multigresv1alpha1.Etcd{ @@ -383,6 +441,33 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }, }, }, + assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { + updatedEtcd := &multigresv1alpha1.Etcd{} + if err := c.Get(context.Background(), types.NamespacedName{Name: "test-etcd-ready", Namespace: "default"}, updatedEtcd); err != nil { + t.Fatalf("Failed to get Etcd: %v", err) + } + + if !updatedEtcd.Status.Ready { + t.Error("Status.Ready should be true") + } + if updatedEtcd.Status.Replicas != 3 { + t.Errorf("Status.Replicas = %d, want 3", updatedEtcd.Status.Replicas) + } + if updatedEtcd.Status.ReadyReplicas != 3 { + t.Errorf("Status.ReadyReplicas = %d, want 3", updatedEtcd.Status.ReadyReplicas) + } + if len(updatedEtcd.Status.Conditions) == 0 { + t.Error("Status.Conditions should not be empty") + } else { + readyCondition := updatedEtcd.Status.Conditions[0] + if readyCondition.Type != "Ready" { + t.Errorf("Condition type = %s, want Ready", readyCondition.Type) + } + if readyCondition.Status != metav1.ConditionTrue { + t.Errorf("Condition status = %s, want True", readyCondition.Status) + } + } + }, }, "error on Get StatefulSet (not NotFound)": { etcd: &multigresv1alpha1.Etcd{ From 3a083dffe3f9fb6142ec8020fef3763c0d03d8a2 Mon Sep 17 00:00:00 2001 From: Ryota Date: Tue, 14 Oct 2025 02:33:32 +0100 Subject: [PATCH 27/28] Ensure test cases have success assertion Some of the test cases have been moved around, formatted for better clarity, extra test spec added, and unused dependencies removed. --- .../controller/etcd/etcd_controller_test.go | 277 ++++++++---------- 1 file changed, 121 insertions(+), 156 deletions(-) diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_test.go b/pkg/resource-handler/controller/etcd/etcd_controller_test.go index 41f08b18..0775a04a 100644 --- a/pkg/resource-handler/controller/etcd/etcd_controller_test.go +++ b/pkg/resource-handler/controller/etcd/etcd_controller_test.go @@ -1,8 +1,6 @@ package etcd import ( - "context" - "slices" "testing" appsv1 "k8s.io/api/apps/v1" @@ -37,6 +35,9 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { wantRequeue bool assertFunc func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) }{ + ////---------------------------------------- + /// Success + //------------------------------------------ "create all resources for new Etcd": { etcd: &multigresv1alpha1.Etcd{ ObjectMeta: metav1.ObjectMeta{ @@ -49,17 +50,23 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { // Verify all three resources were created sts := &appsv1.StatefulSet{} - if err := c.Get(context.Background(), types.NamespacedName{Name: "test-etcd", Namespace: "default"}, sts); err != nil { + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-etcd", Namespace: "default"}, + sts); err != nil { t.Errorf("StatefulSet should exist: %v", err) } headlessSvc := &corev1.Service{} - if err := c.Get(context.Background(), types.NamespacedName{Name: "test-etcd-headless", Namespace: "default"}, headlessSvc); err != nil { + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-etcd-headless", Namespace: "default"}, + headlessSvc); err != nil { t.Errorf("Headless Service should exist: %v", err) } clientSvc := &corev1.Service{} - if err := c.Get(context.Background(), types.NamespacedName{Name: "test-etcd", Namespace: "default"}, clientSvc); err != nil { + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-etcd", Namespace: "default"}, + clientSvc); err != nil { t.Errorf("Client Service should exist: %v", err) } @@ -111,7 +118,7 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }, assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { sts := &appsv1.StatefulSet{} - err := c.Get(context.Background(), types.NamespacedName{ + err := c.Get(t.Context(), types.NamespacedName{ Name: "existing-etcd", Namespace: "default", }, sts) @@ -141,7 +148,9 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { existingObjects: []client.Object{}, assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { sts := &appsv1.StatefulSet{} - if err := c.Get(context.Background(), types.NamespacedName{Name: "etcd-zone1", Namespace: "default"}, sts); err != nil { + if err := c.Get(t.Context(), + types.NamespacedName{Name: "etcd-zone1", Namespace: "default"}, + sts); err != nil { t.Fatalf("Failed to get StatefulSet: %v", err) } if sts.Labels["multigres.com/cell"] != "zone1" { @@ -149,7 +158,9 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { } headlessSvc := &corev1.Service{} - if err := c.Get(context.Background(), types.NamespacedName{Name: "etcd-zone1-headless", Namespace: "default"}, headlessSvc); err != nil { + if err := c.Get(t.Context(), + types.NamespacedName{Name: "etcd-zone1-headless", Namespace: "default"}, + headlessSvc); err != nil { t.Fatalf("Failed to get headless Service: %v", err) } if headlessSvc.Labels["multigres.com/cell"] != "zone1" { @@ -157,7 +168,9 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { } clientSvc := &corev1.Service{} - if err := c.Get(context.Background(), types.NamespacedName{Name: "etcd-zone1", Namespace: "default"}, clientSvc); err != nil { + if err := c.Get(t.Context(), + types.NamespacedName{Name: "etcd-zone1", Namespace: "default"}, + clientSvc); err != nil { t.Fatalf("Failed to get client Service: %v", err) } if clientSvc.Labels["multigres.com/cell"] != "zone1" { @@ -165,6 +178,96 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { } }, }, + "deletion with finalizer": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-deletion", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + existingObjects: []client.Object{ + &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-deletion", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{}, + }, + }, + assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { + updatedEtcd := &multigresv1alpha1.Etcd{} + err := c.Get(t.Context(), + types.NamespacedName{Name: "test-etcd-deletion", Namespace: "default"}, + updatedEtcd) + if err == nil { + t.Errorf("Etcd object should be deleted but still exists (finalizers: %v)", updatedEtcd.Finalizers) + } + }, + }, + "all replicas ready status": { + etcd: &multigresv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-ready", + Namespace: "default", + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.EtcdSpec{ + Replicas: int32Ptr(3), + }, + }, + existingObjects: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-etcd-ready", + Namespace: "default", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: int32Ptr(3), + }, + Status: appsv1.StatefulSetStatus{ + Replicas: 3, + ReadyReplicas: 3, + }, + }, + }, + assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { + updatedEtcd := &multigresv1alpha1.Etcd{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-etcd-ready", Namespace: "default"}, + updatedEtcd); err != nil { + t.Fatalf("Failed to get Etcd: %v", err) + } + + if !updatedEtcd.Status.Ready { + t.Error("Status.Ready should be true") + } + if updatedEtcd.Status.Replicas != 3 { + t.Errorf("Status.Replicas = %d, want 3", updatedEtcd.Status.Replicas) + } + if updatedEtcd.Status.ReadyReplicas != 3 { + t.Errorf("Status.ReadyReplicas = %d, want 3", updatedEtcd.Status.ReadyReplicas) + } + if len(updatedEtcd.Status.Conditions) == 0 { + t.Error("Status.Conditions should not be empty") + } else { + readyCondition := updatedEtcd.Status.Conditions[0] + if readyCondition.Type != "Ready" { + t.Errorf("Condition type = %s, want Ready", readyCondition.Type) + } + if readyCondition.Status != metav1.ConditionTrue { + t.Errorf("Condition status = %s, want True", readyCondition.Status) + } + } + }, + }, + ////---------------------------------------- + /// Error + //------------------------------------------ "error on StatefulSet create": { etcd: &multigresv1alpha1.Etcd{ ObjectMeta: metav1.ObjectMeta{ @@ -393,82 +496,6 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }, wantErr: true, }, - "deletion with finalizer": { - etcd: &multigresv1alpha1.Etcd{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, - Finalizers: []string{finalizerName}, - }, - Spec: multigresv1alpha1.EtcdSpec{}, - }, - existingObjects: []client.Object{}, - assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { - updatedEtcd := &multigresv1alpha1.Etcd{} - if err := c.Get(context.Background(), types.NamespacedName{Name: "test-etcd", Namespace: "default"}, updatedEtcd); err != nil { - t.Fatalf("Failed to get Etcd: %v", err) - } - - if slices.Contains(updatedEtcd.Finalizers, finalizerName) { - t.Errorf("Finalizer %s should be removed", finalizerName) - } - }, - }, - "all replicas ready status": { - etcd: &multigresv1alpha1.Etcd{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-ready", - Namespace: "default", - Finalizers: []string{finalizerName}, - }, - Spec: multigresv1alpha1.EtcdSpec{ - Replicas: int32Ptr(3), - }, - }, - existingObjects: []client.Object{ - &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-ready", - Namespace: "default", - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: int32Ptr(3), - }, - Status: appsv1.StatefulSetStatus{ - Replicas: 3, - ReadyReplicas: 3, - }, - }, - }, - assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { - updatedEtcd := &multigresv1alpha1.Etcd{} - if err := c.Get(context.Background(), types.NamespacedName{Name: "test-etcd-ready", Namespace: "default"}, updatedEtcd); err != nil { - t.Fatalf("Failed to get Etcd: %v", err) - } - - if !updatedEtcd.Status.Ready { - t.Error("Status.Ready should be true") - } - if updatedEtcd.Status.Replicas != 3 { - t.Errorf("Status.Replicas = %d, want 3", updatedEtcd.Status.Replicas) - } - if updatedEtcd.Status.ReadyReplicas != 3 { - t.Errorf("Status.ReadyReplicas = %d, want 3", updatedEtcd.Status.ReadyReplicas) - } - if len(updatedEtcd.Status.Conditions) == 0 { - t.Error("Status.Conditions should not be empty") - } else { - readyCondition := updatedEtcd.Status.Conditions[0] - if readyCondition.Type != "Ready" { - t.Errorf("Condition type = %s, want Ready", readyCondition.Type) - } - if readyCondition.Status != metav1.ConditionTrue { - t.Errorf("Condition status = %s, want True", readyCondition.Status) - } - } - }, - }, "error on Get StatefulSet (not NotFound)": { etcd: &multigresv1alpha1.Etcd{ ObjectMeta: metav1.ObjectMeta{ @@ -605,7 +632,7 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { } } if !etcdInExisting { - err := fakeClient.Create(context.Background(), tc.etcd) + err := fakeClient.Create(t.Context(), tc.etcd) if err != nil { t.Fatalf("Failed to create Etcd: %v", err) } @@ -619,7 +646,7 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }, } - result, err := reconciler.Reconcile(context.Background(), req) + result, err := reconciler.Reconcile(t.Context(), req) if (err != nil) != tc.wantErr { t.Errorf("Reconcile() error = %v, wantErr %v", err, tc.wantErr) return @@ -628,79 +655,17 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { return } - // Check requeue - if result.Requeue != tc.wantRequeue { - t.Errorf("Reconcile() result.Requeue = %v, want %v", result.Requeue, tc.wantRequeue) - } + // NOTE: Check for requeue delay when we need to support such setup. + _ = result + // // Check requeue + // if (result.RequeueAfter != 0) != tc.wantRequeue { + // t.Errorf("Reconcile() result.Requeue = %v, want %v", result.RequeueAfter, tc.wantRequeue) + // } // Run custom assertions if provided if tc.assertFunc != nil { tc.assertFunc(t, fakeClient, tc.etcd) } - - // For success cases, verify all resources were created with correct labels - expectedCellName := tc.etcd.Spec.CellName - if expectedCellName == "" { - expectedCellName = "multigres-global-topo" - } - - // Verify StatefulSet - sts := &appsv1.StatefulSet{} - err = fakeClient.Get(context.Background(), types.NamespacedName{ - Name: tc.etcd.Name, - Namespace: tc.etcd.Namespace, - }, sts) - if err != nil { - t.Errorf("StatefulSet should exist, got error: %v", err) - } else { - if sts.Labels["multigres.com/cell"] != expectedCellName { - t.Errorf("StatefulSet cell label = %v, want %v", sts.Labels["multigres.com/cell"], expectedCellName) - } - if sts.Labels["app.kubernetes.io/component"] != "etcd" { - t.Errorf("StatefulSet component label = %v, want etcd", sts.Labels["app.kubernetes.io/component"]) - } - } - - // Verify headless Service - headlessSvc := &corev1.Service{} - err = fakeClient.Get(context.Background(), types.NamespacedName{ - Name: tc.etcd.Name + "-headless", - Namespace: tc.etcd.Namespace, - }, headlessSvc) - if err != nil { - t.Errorf("Headless Service should exist, got error: %v", err) - } else { - if headlessSvc.Labels["multigres.com/cell"] != expectedCellName { - t.Errorf("Headless Service cell label = %v, want %v", headlessSvc.Labels["multigres.com/cell"], expectedCellName) - } - } - - // Verify client Service - clientSvc := &corev1.Service{} - err = fakeClient.Get(context.Background(), types.NamespacedName{ - Name: tc.etcd.Name, - Namespace: tc.etcd.Namespace, - }, clientSvc) - if err != nil { - t.Errorf("Client Service should exist, got error: %v", err) - } else { - if clientSvc.Labels["multigres.com/cell"] != expectedCellName { - t.Errorf("Client Service cell label = %v, want %v", clientSvc.Labels["multigres.com/cell"], expectedCellName) - } - } - - // Verify finalizer - etcd := &multigresv1alpha1.Etcd{} - err = fakeClient.Get(context.Background(), types.NamespacedName{ - Name: tc.etcd.Name, - Namespace: tc.etcd.Namespace, - }, etcd) - if err != nil { - t.Fatalf("Failed to get Etcd: %v", err) - } - if !slices.Contains(etcd.Finalizers, finalizerName) { - t.Errorf("Finalizer %s should be present", finalizerName) - } }) } } @@ -728,7 +693,7 @@ func TestEtcdReconciler_ReconcileNotFound(t *testing.T) { }, } - result, err := reconciler.Reconcile(context.Background(), req) + result, err := reconciler.Reconcile(t.Context(), req) if err != nil { t.Errorf("Reconcile() should not error on NotFound, got: %v", err) } From 5eecb766bad7717a4db19b131cfae295040c7c21 Mon Sep 17 00:00:00 2001 From: Ryota Date: Tue, 14 Oct 2025 03:28:05 +0100 Subject: [PATCH 28/28] Add some more test for safety --- .../controller/etcd/etcd_controller_test.go | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_test.go b/pkg/resource-handler/controller/etcd/etcd_controller_test.go index 0775a04a..ccece394 100644 --- a/pkg/resource-handler/controller/etcd/etcd_controller_test.go +++ b/pkg/resource-handler/controller/etcd/etcd_controller_test.go @@ -1,6 +1,7 @@ package etcd import ( + "slices" "testing" appsv1 "k8s.io/api/apps/v1" @@ -70,10 +71,17 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { t.Errorf("Client Service should exist: %v", err) } - // Verify default values were applied - // Note: Only checking replicas here - full resource validation is in statefulset_test.go + // Verify defaults and finalizer if *sts.Spec.Replicas != DefaultReplicas { - t.Errorf("StatefulSet replicas = %d, want default %d", *sts.Spec.Replicas, DefaultReplicas) + t.Errorf("StatefulSet replicas = %d, want %d", *sts.Spec.Replicas, DefaultReplicas) + } + + updatedEtcd := &multigresv1alpha1.Etcd{} + if err := c.Get(t.Context(), types.NamespacedName{Name: "test-etcd", Namespace: "default"}, updatedEtcd); err != nil { + t.Fatalf("Failed to get Etcd: %v", err) + } + if !slices.Contains(updatedEtcd.Finalizers, finalizerName) { + t.Errorf("Finalizer should be added") } }, }, @@ -263,6 +271,10 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { t.Errorf("Condition status = %s, want True", readyCondition.Status) } } + + if !slices.Contains(updatedEtcd.Finalizers, finalizerName) { + t.Errorf("Finalizer should be present") + } }, }, ////----------------------------------------