diff --git a/.github/workflows/_reusable-test-coverage.yaml b/.github/workflows/_reusable-test-coverage.yaml index 05fd8cc7..7b1301e2 100644 --- a/.github/workflows/_reusable-test-coverage.yaml +++ b/.github/workflows/_reusable-test-coverage.yaml @@ -44,7 +44,7 @@ jobs: fetch-depth: 0 # Fetch all history for changed-modules detection - name: Fetch base branch for comparison - run: | + run: | TARGET_BRANCH="${{ github.base_ref || 'main' }}" echo "Fetching comparison target: $TARGET_BRANCH" git fetch origin ${TARGET_BRANCH}:refs/remotes/origin/${TARGET_BRANCH} @@ -93,6 +93,11 @@ jobs: echo "Previous test coverage: Not found" fi + ## Setup Workspace for Tooling + # This forces Go to look at local directories for modules instead of trying to fetch them from online. + go work init + go work use -r . + ## Step 2. Check new coverage # Get into directory so that go tool cover can work go tool cover -func=coverage/combined.out > /tmp/coverage.txt @@ -108,15 +113,16 @@ jobs: # change is made, the previous test reports will be minimised, leaving the # only relevant comment. - name: Check and report - uses: actions/github-script@v6 # Based on Node.js v16 - if: always() && - github.event_name == 'pull_request' + uses: actions/github-script@v6 + if: always() && github.event_name == 'pull_request' + env: + COVERAGE_THRESHOLD: ${{ inputs.COVERAGE_THRESHOLD }} with: retries: 3 script: | const fs = require('fs/promises') - // 1. Retrieve existing bot comments for the PR + // 1. Retrieve existing bot comments const { data: comments } = await github.rest.issues.listComments({ ...context.repo, issue_number: context.issue.number, @@ -126,23 +132,44 @@ jobs: comment.body.includes('Go Test Coverage Report') }); - // 2. Prepare comment - const report = await fs.readFile('/tmp/coverage.txt') - const overallStatus = - ${{ inputs.COVERAGE_THRESHOLD }} > ${{ env.NEW_COVERAGE }} ? - "❌ FAIL: Coverage less than threshold of `${{ inputs.COVERAGE_THRESHOLD }}`" : - ${{ env.PREV_COVERAGE || '0' }} > ${{ env.NEW_COVERAGE }} ? - "❌ FAIL: Coverage less than the previous run" : - "✅ PASS" + // 2. Safe Value Parsing + const threshold = parseFloat(process.env.COVERAGE_THRESHOLD) || 0; + const newCovStr = process.env.NEW_COVERAGE; + const prevCovStr = process.env.PREV_COVERAGE; + + const newCov = parseFloat(newCovStr); // NaN if undefined/empty + const prevCov = parseFloat(prevCovStr) || 0; + + let overallStatus = "⚠️ SKIPPED (Build Failed?)"; + + // Only calculate status if we actually have coverage data + if (!isNaN(newCov)) { + if (threshold > newCov) { + overallStatus = `❌ FAIL: Coverage (${newCov}%) < Threshold (${threshold}%)`; + } else if (prevCov > newCov) { + overallStatus = `❌ FAIL: Coverage decreased (${prevCov}% -> ${newCov}%)`; + } else { + overallStatus = "✅ PASS"; + } + } + + // 3. Read Report or Default Message + let reportContent = "No coverage report generated (Build likely failed)."; + try { + reportContent = await fs.readFile('/tmp/coverage.txt', 'utf8'); + } catch (e) { + console.log("Could not read coverage file:", e.message); + } + const comment = `### 🔬 Go Test Coverage Report #### Summary | Coverage Type | Result | | ---------------------- | -------------------------------------- | - | Threshold | ${{ inputs.COVERAGE_THRESHOLD }}% | - | Previous Test Coverage | ${{ env.PREV_COVERAGE || 'Unknown' }}% | - | New Test Coverage | ${{ env.NEW_COVERAGE }}% | + | Threshold | ${threshold}% | + | Previous Test Coverage | ${prevCovStr || 'Unknown'}% | + | New Test Coverage | ${newCovStr || 'Unknown'}% | #### Status @@ -153,14 +180,14 @@ jobs:
Show New Coverage \`\`\` - ${report}\`\`\` + ${reportContent} + \`\`\`
`; - // 3. If there are any old comments, minimize all of them first. + // 4. Minimize old comments for (const botComment of botComments) { - core.notice("There was an old comment found in the PR, minimizing it.") const query = `mutation { minimizeComment(input: {classifier: OUTDATED, subjectId: "${botComment.node_id}"}) { clientMutationId @@ -169,7 +196,7 @@ jobs: await github.graphql(query) } - // 4. Create a comment with the coverage report + // 5. Create new comment github.rest.issues.createComment({ ...context.repo, issue_number: context.issue.number, @@ -178,7 +205,7 @@ jobs: # Exit with non-zero value if the test coverage has decreased or not # reached the threshold. -- name: Check coverage status + - name: Check coverage status run: | echo "Coverage Threshold: ${{ inputs.COVERAGE_THRESHOLD }}%" echo "Previous test coverage: ${{ env.PREV_COVERAGE || 'Unknown' }}%" @@ -226,5 +253,4 @@ jobs: path: | ~/.cache/coverage.txt ~/.cache/go-build - ~/go/pkg/mod - + ~/go/pkg/mod \ No newline at end of file diff --git a/.gitignore b/.gitignore index fbf16aa6..75ca3ad8 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,7 @@ bin/ # kind local development kubeconfig.yaml + + +# MacOS +.DS_Store \ No newline at end of file diff --git a/api/v1alpha1/cell_types.go b/api/v1alpha1/cell_types.go index a7ae9de8..83e6b7f5 100644 --- a/api/v1alpha1/cell_types.go +++ b/api/v1alpha1/cell_types.go @@ -57,11 +57,11 @@ type CellSpec struct { // TopoServer defines the local topology config. // +optional - TopoServer LocalTopoServerSpec `json:"topoServer,omitempty"` + TopoServer *LocalTopoServerSpec `json:"topoServer,omitempty"` // AllCells list for discovery. // +optional - // +kubebuilder:validation:MaxItems=100 + // +kubebuilder:validation:MaxItems=50 AllCells []CellName `json:"allCells,omitempty"` // TopologyReconciliation flags. @@ -71,7 +71,10 @@ type CellSpec struct { // TopologyReconciliation defines flags for the cell controller. type TopologyReconciliation struct { + // RegisterCell indicates if the cell should register itself in the topology. RegisterCell bool `json:"registerCell"` + + // PrunePoolers indicates if unused poolers should be removed. PrunePoolers bool `json:"prunePoolers"` } @@ -85,8 +88,13 @@ type CellStatus struct { // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` - GatewayReplicas int32 `json:"gatewayReplicas"` + // GatewayReplicas is the total number of gateway pods. + GatewayReplicas int32 `json:"gatewayReplicas"` + + // GatewayReadyReplicas is the number of gateway pods that are ready. GatewayReadyReplicas int32 `json:"gatewayReadyReplicas"` + + // GatewayServiceName is the DNS name of the gateway service. // +kubebuilder:validation:MaxLength=253 GatewayServiceName string `json:"gatewayServiceName,omitempty"` } diff --git a/api/v1alpha1/common_types.go b/api/v1alpha1/common_types.go index a66cac64..c295eb77 100644 --- a/api/v1alpha1/common_types.go +++ b/api/v1alpha1/common_types.go @@ -45,13 +45,13 @@ type StatelessSpec struct { // PodAnnotations are annotations to add to the pods. // +optional - // +kubebuilder:validation:MaxProperties=64 + // +kubebuilder:validation:MaxProperties=20 // +kubebuilder:validation:XValidation:rule="self.all(k, size(k) < 64 && size(self[k]) < 256)",message="annotation keys must be <64 chars and values <256 chars" PodAnnotations map[string]string `json:"podAnnotations,omitempty"` // PodLabels are additional labels to add to the pods. // +optional - // +kubebuilder:validation:MaxProperties=64 + // +kubebuilder:validation:MaxProperties=20 // +kubebuilder:validation:XValidation:rule="self.all(k, size(k) < 64 && size(self[k]) < 64)",message="label keys and values must be <64 chars" PodLabels map[string]string `json:"podLabels,omitempty"` } diff --git a/api/v1alpha1/multigrescluster_types.go b/api/v1alpha1/multigrescluster_types.go index 8934e222..31e27926 100644 --- a/api/v1alpha1/multigrescluster_types.go +++ b/api/v1alpha1/multigrescluster_types.go @@ -48,7 +48,7 @@ type MultigresClusterSpec struct { // +optional // +listType=map // +listMapKey=name - // +kubebuilder:validation:MaxItems=100 + // +kubebuilder:validation:MaxItems=50 Cells []CellConfig `json:"cells,omitempty"` // Databases defines the logical databases, table groups, and sharding. @@ -56,7 +56,7 @@ type MultigresClusterSpec struct { // +listType=map // +listMapKey=name // +kubebuilder:validation:XValidation:rule="self.filter(x, has(x.default) && x.default).size() <= 1",message="only one database can be marked as default" - // +kubebuilder:validation:MaxItems=500 + // +kubebuilder:validation:MaxItems=50 Databases []DatabaseConfig `json:"databases,omitempty"` } @@ -218,7 +218,7 @@ type DatabaseConfig struct { // +listType=map // +listMapKey=name // +kubebuilder:validation:XValidation:rule="self.filter(x, has(x.default) && x.default).size() <= 1",message="only one tablegroup can be marked as default" - // +kubebuilder:validation:MaxItems=100 + // +kubebuilder:validation:MaxItems=20 TableGroups []TableGroupConfig `json:"tablegroups,omitempty"` } @@ -237,7 +237,7 @@ type TableGroupConfig struct { // +optional // +listType=map // +listMapKey=name - // +kubebuilder:validation:MaxItems=128 + // +kubebuilder:validation:MaxItems=32 Shards []ShardConfig `json:"shards,omitempty"` } @@ -272,7 +272,7 @@ type ShardOverrides struct { // Pools overrides. Keyed by pool name. // +optional - // +kubebuilder:validation:MaxProperties=32 + // +kubebuilder:validation:MaxProperties=8 // +kubebuilder:validation:XValidation:rule="self.all(key, size(key) < 63)",message="pool names must be < 63 chars" Pools map[string]PoolSpec `json:"pools,omitempty"` } @@ -285,7 +285,7 @@ type ShardInlineSpec struct { // Pools configuration. Keyed by pool name. // +optional - // +kubebuilder:validation:MaxProperties=32 + // +kubebuilder:validation:MaxProperties=8 // +kubebuilder:validation:XValidation:rule="self.all(key, size(key) < 63)",message="pool names must be < 63 chars" Pools map[string]PoolSpec `json:"pools,omitempty"` } @@ -305,13 +305,13 @@ type MultigresClusterStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty"` // Cells status summary. // +optional - // +kubebuilder:validation:MaxProperties=100 + // +kubebuilder:validation:MaxProperties=50 // +kubebuilder:validation:XValidation:rule="self.all(key, size(key) < 63)",message="cell names must be < 63 chars" Cells map[string]CellStatusSummary `json:"cells,omitempty"` // Databases status summary. // +optional - // +kubebuilder:validation:MaxProperties=500 + // +kubebuilder:validation:MaxProperties=50 // +kubebuilder:validation:XValidation:rule="self.all(key, size(key) < 63)",message="database names must be < 63 chars" Databases map[string]DatabaseStatusSummary `json:"databases,omitempty"` } diff --git a/api/v1alpha1/shard_types.go b/api/v1alpha1/shard_types.go index 7c9ddbe4..a21dbaa5 100644 --- a/api/v1alpha1/shard_types.go +++ b/api/v1alpha1/shard_types.go @@ -36,7 +36,7 @@ type MultiOrchSpec struct { // Cells defines the list of cells where this MultiOrch should be deployed. // If empty, it defaults to all cells where pools are defined. // +optional - // +kubebuilder:validation:MaxItems=100 + // +kubebuilder:validation:MaxItems=50 Cells []CellName `json:"cells,omitempty"` } @@ -49,7 +49,7 @@ type PoolSpec struct { // Cells defines the list of cells where this Pool should be deployed. // +optional - // +kubebuilder:validation:MaxItems=100 + // +kubebuilder:validation:MaxItems=50 Cells []CellName `json:"cells,omitempty"` // ReplicasPerCell is the desired number of pods PER CELL in this pool. @@ -83,34 +83,44 @@ type PoolSpec struct { // ShardSpec defines the desired state of Shard. type ShardSpec struct { + // DatabaseName is the name of the logical database this shard belongs to. // +kubebuilder:validation:MaxLength=63 DatabaseName string `json:"databaseName"` + + // TableGroupName is the name of the table group this shard belongs to. // +kubebuilder:validation:MaxLength=63 TableGroupName string `json:"tableGroupName"` + + // ShardName is the specific identifier for this shard (e.g. "0"). // +kubebuilder:validation:MaxLength=63 ShardName string `json:"shardName"` - // Images required. + // Images defines the container images to be used by this shard (defined globally at MultigresCluster). Images ShardImages `json:"images"` - // GlobalTopoServer reference. + // GlobalTopoServer is a reference to the global topology server. GlobalTopoServer GlobalTopoServerRef `json:"globalTopoServer"` - // MultiOrch fully resolved spec. + // MultiOrch is the fully resolved configuration for the shard orchestrator. MultiOrch MultiOrchSpec `json:"multiorch"` - // Pools fully resolved spec. - // +kubebuilder:validation:MaxProperties=32 + // Pools is the map of fully resolved data pool configurations. + // +kubebuilder:validation:MaxProperties=8 // +kubebuilder:validation:XValidation:rule="self.all(key, size(key) < 63)",message="pool names must be < 63 chars" Pools map[string]PoolSpec `json:"pools"` } // ShardImages defines the images required for a Shard. type ShardImages struct { + // MultiOrch is the image for the shard orchestrator. // +kubebuilder:validation:MaxLength=512 MultiOrch string `json:"multiorch"` + + // MultiPooler is the image for the connection pooler sidecar. // +kubebuilder:validation:MaxLength=512 MultiPooler string `json:"multipooler"` + + // Postgres is the image for the postgres database. // +kubebuilder:validation:MaxLength=512 Postgres string `json:"postgres"` } @@ -127,10 +137,13 @@ type ShardStatus struct { // Cells is a list of cells this shard is currently deployed to. // +optional - // +kubebuilder:validation:MaxItems=100 + // +kubebuilder:validation:MaxItems=50 Cells []CellName `json:"cells,omitempty"` - OrchReady bool `json:"orchReady"` + // OrchReady indicates if the MultiOrch component is ready. + OrchReady bool `json:"orchReady"` + + // PoolsReady indicates if all data pools are ready. PoolsReady bool `json:"poolsReady"` } diff --git a/api/v1alpha1/shardtemplate_types.go b/api/v1alpha1/shardtemplate_types.go index 52f89b40..561972f1 100644 --- a/api/v1alpha1/shardtemplate_types.go +++ b/api/v1alpha1/shardtemplate_types.go @@ -31,7 +31,7 @@ type ShardTemplateSpec struct { MultiOrch *MultiOrchSpec `json:"multiorch,omitempty"` // +optional - // +kubebuilder:validation:MaxProperties=32 + // +kubebuilder:validation:MaxProperties=8 // +kubebuilder:validation:XValidation:rule="self.all(key, size(key) < 63)",message="pool names must be < 63 chars" Pools map[string]PoolSpec `json:"pools,omitempty"` } diff --git a/api/v1alpha1/tablegroup_types.go b/api/v1alpha1/tablegroup_types.go index 5ba734f2..a43a8f5e 100644 --- a/api/v1alpha1/tablegroup_types.go +++ b/api/v1alpha1/tablegroup_types.go @@ -29,8 +29,11 @@ import ( // TableGroupSpec defines the desired state of TableGroup. type TableGroupSpec struct { + // DatabaseName is the name of the logical database. // +kubebuilder:validation:MaxLength=63 DatabaseName string `json:"databaseName"` + + // TableGroupName is the name of this table group. // +kubebuilder:validation:MaxLength=63 TableGroupName string `json:"tableGroupName"` @@ -38,28 +41,29 @@ type TableGroupSpec struct { // +optional IsDefault bool `json:"default,omitempty"` - // Images required for child shards. + // Images defines the container images used for child shards - defined globally in MultigresCluster. Images ShardImages `json:"images"` - // GlobalTopoServer reference. + // GlobalTopoServer is a reference to the global topology server. GlobalTopoServer GlobalTopoServerRef `json:"globalTopoServer"` // Shards is the list of FULLY RESOLVED shard specifications. - // +kubebuilder:validation:MaxItems=128 + // +kubebuilder:validation:MaxItems=32 Shards []ShardResolvedSpec `json:"shards"` } // ShardResolvedSpec represents the fully calculated spec for a shard, // pushed down to the TableGroup. type ShardResolvedSpec struct { + // Name is the identifier of the shard. // +kubebuilder:validation:MaxLength=63 Name string `json:"name"` - // MultiOrch fully resolved spec. + // MultiOrch is the fully resolved configuration for the orchestrator. MultiOrch MultiOrchSpec `json:"multiorch"` - // Pools fully resolved spec. - // +kubebuilder:validation:MaxProperties=32 + // Pools is the map of fully resolved data pool configurations. + // +kubebuilder:validation:MaxProperties=8 // +kubebuilder:validation:XValidation:rule="self.all(key, size(key) < 63)",message="pool names must be < 63 chars" Pools map[string]PoolSpec `json:"pools"` } diff --git a/api/v1alpha1/toposerver_types.go b/api/v1alpha1/toposerver_types.go index 55a6f2db..248754a6 100644 --- a/api/v1alpha1/toposerver_types.go +++ b/api/v1alpha1/toposerver_types.go @@ -141,12 +141,17 @@ type LocalTopoServerSpec struct { // GlobalTopoServerRef defines a reference to the global topo server. // Used by Cell, TableGroup, and Shard. type GlobalTopoServerRef struct { + // Address is the DNS address of the topology server. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=512 Address string `json:"address"` + + // RootPath is the etcd prefix for this cluster. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=512 RootPath string `json:"rootPath"` + + // Implementation defines the client implementation (e.g. "etcd"). // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 Implementation string `json:"implementation"` diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index a81a085e..e1f62493 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -140,7 +140,11 @@ func (in *CellSpec) DeepCopyInto(out *CellSpec) { *out = *in in.MultiGateway.DeepCopyInto(&out.MultiGateway) out.GlobalTopoServer = in.GlobalTopoServer - in.TopoServer.DeepCopyInto(&out.TopoServer) + if in.TopoServer != nil { + in, out := &in.TopoServer, &out.TopoServer + *out = new(LocalTopoServerSpec) + (*in).DeepCopyInto(*out) + } if in.AllCells != nil { in, out := &in.AllCells, &out.AllCells *out = make([]CellName, len(*in)) diff --git a/config/crd/bases/multigres.com_cells.yaml b/config/crd/bases/multigres.com_cells.yaml index e3fc116d..3ac018c3 100644 --- a/config/crd/bases/multigres.com_cells.yaml +++ b/config/crd/bases/multigres.com_cells.yaml @@ -54,20 +54,24 @@ spec: maxLength: 63 minLength: 1 type: string - maxItems: 100 + maxItems: 50 type: array globalTopoServer: description: GlobalTopoServer reference (always populated). properties: address: + description: Address is the DNS address of the topology server. maxLength: 512 minLength: 1 type: string implementation: + description: Implementation defines the client implementation + (e.g. "etcd"). maxLength: 63 minLength: 1 type: string rootPath: + description: RootPath is the etcd prefix for this cluster. maxLength: 512 minLength: 1 type: string @@ -1002,7 +1006,7 @@ spec: additionalProperties: type: string description: PodAnnotations are annotations to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: annotation keys must be <64 chars and values <256 chars @@ -1011,7 +1015,7 @@ spec: additionalProperties: type: string description: PodLabels are additional labels to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: label keys and values must be <64 chars @@ -1225,8 +1229,12 @@ spec: description: TopologyReconciliation flags. properties: prunePoolers: + description: PrunePoolers indicates if unused poolers should be + removed. type: boolean registerCell: + description: RegisterCell indicates if the cell should register + itself in the topology. type: boolean required: - prunePoolers @@ -1306,12 +1314,16 @@ spec: type: object type: array gatewayReadyReplicas: + description: GatewayReadyReplicas is the number of gateway pods that + are ready. format: int32 type: integer gatewayReplicas: + description: GatewayReplicas is the total number of gateway pods. format: int32 type: integer gatewayServiceName: + description: GatewayServiceName is the DNS name of the gateway service. maxLength: 253 type: string required: diff --git a/config/crd/bases/multigres.com_celltemplates.yaml b/config/crd/bases/multigres.com_celltemplates.yaml index 400b1203..c86792db 100644 --- a/config/crd/bases/multigres.com_celltemplates.yaml +++ b/config/crd/bases/multigres.com_celltemplates.yaml @@ -1093,7 +1093,7 @@ spec: additionalProperties: type: string description: PodAnnotations are annotations to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: annotation keys must be <64 chars and values <256 chars @@ -1102,7 +1102,7 @@ spec: additionalProperties: type: string description: PodLabels are additional labels to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: label keys and values must be <64 chars diff --git a/config/crd/bases/multigres.com_coretemplates.yaml b/config/crd/bases/multigres.com_coretemplates.yaml index 6f1ad057..ec176f78 100644 --- a/config/crd/bases/multigres.com_coretemplates.yaml +++ b/config/crd/bases/multigres.com_coretemplates.yaml @@ -1062,7 +1062,7 @@ spec: additionalProperties: type: string description: PodAnnotations are annotations to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: annotation keys must be <64 chars and values <256 chars @@ -1071,7 +1071,7 @@ spec: additionalProperties: type: string description: PodLabels are additional labels to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: label keys and values must be <64 chars diff --git a/config/crd/bases/multigres.com_multigresclusters.yaml b/config/crd/bases/multigres.com_multigresclusters.yaml index 954c4083..e0c1bf49 100644 --- a/config/crd/bases/multigres.com_multigresclusters.yaml +++ b/config/crd/bases/multigres.com_multigresclusters.yaml @@ -1007,7 +1007,7 @@ spec: type: string description: PodAnnotations are annotations to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: annotation keys must be <64 chars and values @@ -1019,7 +1019,7 @@ spec: type: string description: PodLabels are additional labels to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: label keys and values must be <64 chars @@ -2175,7 +2175,7 @@ spec: type: string description: PodAnnotations are annotations to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: annotation keys must be <64 chars and values @@ -2187,7 +2187,7 @@ spec: type: string description: PodLabels are additional labels to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: label keys and values must be <64 chars @@ -2274,7 +2274,7 @@ spec: rule: '!(has(self.spec) && has(self.cellTemplate))' - message: must specify either 'zone' or 'region', but not both rule: has(self.zone) != has(self.region) - maxItems: 100 + maxItems: 50 type: array x-kubernetes-list-map-keys: - name @@ -3312,14 +3312,14 @@ spec: maxLength: 63 minLength: 1 type: string - maxItems: 100 + maxItems: 50 type: array podAnnotations: additionalProperties: type: string description: PodAnnotations are annotations to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: annotation keys must be <64 chars @@ -3331,7 +3331,7 @@ spec: type: string description: PodLabels are additional labels to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: label keys and values must be @@ -4411,7 +4411,7 @@ spec: maxLength: 63 minLength: 1 type: string - maxItems: 100 + maxItems: 50 type: array multipooler: description: Multipooler container configuration. @@ -4576,7 +4576,7 @@ spec: type: object description: Pools overrides. Keyed by pool name. - maxProperties: 32 + maxProperties: 8 type: object x-kubernetes-validations: - message: pool names must be < 63 chars @@ -5580,14 +5580,14 @@ spec: maxLength: 63 minLength: 1 type: string - maxItems: 100 + maxItems: 50 type: array podAnnotations: additionalProperties: type: string description: PodAnnotations are annotations to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: annotation keys must be <64 chars @@ -5599,7 +5599,7 @@ spec: type: string description: PodLabels are additional labels to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: label keys and values must be @@ -6679,7 +6679,7 @@ spec: maxLength: 63 minLength: 1 type: string - maxItems: 100 + maxItems: 50 type: array multipooler: description: Multipooler container configuration. @@ -6844,7 +6844,7 @@ spec: type: object description: Pools configuration. Keyed by pool name. - maxProperties: 32 + maxProperties: 8 type: object x-kubernetes-validations: - message: pool names must be < 63 chars @@ -6856,7 +6856,7 @@ spec: x-kubernetes-validations: - message: cannot specify both 'spec' and 'shardTemplate' rule: '!(has(self.spec) && has(self.shardTemplate))' - maxItems: 128 + maxItems: 32 type: array x-kubernetes-list-map-keys: - name @@ -6864,7 +6864,7 @@ spec: required: - name type: object - maxItems: 100 + maxItems: 20 type: array x-kubernetes-list-map-keys: - name @@ -6876,7 +6876,7 @@ spec: required: - name type: object - maxItems: 500 + maxItems: 50 type: array x-kubernetes-list-map-keys: - name @@ -8007,7 +8007,7 @@ spec: type: string description: PodAnnotations are annotations to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: annotation keys must be <64 chars and values <256 @@ -8018,7 +8018,7 @@ spec: type: string description: PodLabels are additional labels to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: label keys and values must be <64 chars @@ -8140,7 +8140,7 @@ spec: - ready type: object description: Cells status summary. - maxProperties: 100 + maxProperties: 50 type: object x-kubernetes-validations: - message: cell names must be < 63 chars @@ -8218,7 +8218,7 @@ spec: - totalShards type: object description: Databases status summary. - maxProperties: 500 + maxProperties: 50 type: object x-kubernetes-validations: - message: database names must be < 63 chars diff --git a/config/crd/bases/multigres.com_shards.yaml b/config/crd/bases/multigres.com_shards.yaml index 44de0b83..5da1519f 100644 --- a/config/crd/bases/multigres.com_shards.yaml +++ b/config/crd/bases/multigres.com_shards.yaml @@ -44,20 +44,27 @@ spec: description: ShardSpec defines the desired state of Shard. properties: databaseName: + description: DatabaseName is the name of the logical database this + shard belongs to. maxLength: 63 type: string globalTopoServer: - description: GlobalTopoServer reference. + description: GlobalTopoServer is a reference to the global topology + server. properties: address: + description: Address is the DNS address of the topology server. maxLength: 512 minLength: 1 type: string implementation: + description: Implementation defines the client implementation + (e.g. "etcd"). maxLength: 63 minLength: 1 type: string rootPath: + description: RootPath is the etcd prefix for this cluster. maxLength: 512 minLength: 1 type: string @@ -67,15 +74,20 @@ spec: - rootPath type: object images: - description: Images required. + description: Images defines the container images to be used by this + shard (defined globally at MultigresCluster). properties: multiorch: + description: MultiOrch is the image for the shard orchestrator. maxLength: 512 type: string multipooler: + description: MultiPooler is the image for the connection pooler + sidecar. maxLength: 512 type: string postgres: + description: Postgres is the image for the postgres database. maxLength: 512 type: string required: @@ -84,7 +96,8 @@ spec: - postgres type: object multiorch: - description: MultiOrch fully resolved spec. + description: MultiOrch is the fully resolved configuration for the + shard orchestrator. properties: affinity: description: Affinity defines the pod's scheduling constraints. @@ -1015,13 +1028,13 @@ spec: maxLength: 63 minLength: 1 type: string - maxItems: 100 + maxItems: 50 type: array podAnnotations: additionalProperties: type: string description: PodAnnotations are annotations to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: annotation keys must be <64 chars and values <256 chars @@ -1030,7 +1043,7 @@ spec: additionalProperties: type: string description: PodLabels are additional labels to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: label keys and values must be <64 chars @@ -2034,7 +2047,7 @@ spec: maxLength: 63 minLength: 1 type: string - maxItems: 100 + maxItems: 50 type: array multipooler: description: Multipooler container configuration. @@ -2192,16 +2205,20 @@ spec: - readOnly type: string type: object - description: Pools fully resolved spec. - maxProperties: 32 + description: Pools is the map of fully resolved data pool configurations. + maxProperties: 8 type: object x-kubernetes-validations: - message: pool names must be < 63 chars rule: self.all(key, size(key) < 63) shardName: + description: ShardName is the specific identifier for this shard (e.g. + "0"). maxLength: 63 type: string tableGroupName: + description: TableGroupName is the name of the table group this shard + belongs to. maxLength: 63 type: string required: @@ -2225,7 +2242,7 @@ spec: maxLength: 63 minLength: 1 type: string - maxItems: 100 + maxItems: 50 type: array conditions: description: Conditions represent the latest available observations. @@ -2285,8 +2302,10 @@ spec: type: object type: array orchReady: + description: OrchReady indicates if the MultiOrch component is ready. type: boolean poolsReady: + description: PoolsReady indicates if all data pools are ready. type: boolean required: - orchReady diff --git a/config/crd/bases/multigres.com_shardtemplates.yaml b/config/crd/bases/multigres.com_shardtemplates.yaml index 66cc41c9..6e6705c2 100644 --- a/config/crd/bases/multigres.com_shardtemplates.yaml +++ b/config/crd/bases/multigres.com_shardtemplates.yaml @@ -972,13 +972,13 @@ spec: maxLength: 63 minLength: 1 type: string - maxItems: 100 + maxItems: 50 type: array podAnnotations: additionalProperties: type: string description: PodAnnotations are annotations to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: annotation keys must be <64 chars and values <256 chars @@ -987,7 +987,7 @@ spec: additionalProperties: type: string description: PodLabels are additional labels to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: label keys and values must be <64 chars @@ -1991,7 +1991,7 @@ spec: maxLength: 63 minLength: 1 type: string - maxItems: 100 + maxItems: 50 type: array multipooler: description: Multipooler container configuration. @@ -2149,7 +2149,7 @@ spec: - readOnly type: string type: object - maxProperties: 32 + maxProperties: 8 type: object x-kubernetes-validations: - message: pool names must be < 63 chars diff --git a/config/crd/bases/multigres.com_tablegroups.yaml b/config/crd/bases/multigres.com_tablegroups.yaml index 30de1bb5..b4c3afcb 100644 --- a/config/crd/bases/multigres.com_tablegroups.yaml +++ b/config/crd/bases/multigres.com_tablegroups.yaml @@ -44,6 +44,7 @@ spec: description: TableGroupSpec defines the desired state of TableGroup. properties: databaseName: + description: DatabaseName is the name of the logical database. maxLength: 63 type: string default: @@ -51,17 +52,22 @@ spec: group for the database. type: boolean globalTopoServer: - description: GlobalTopoServer reference. + description: GlobalTopoServer is a reference to the global topology + server. properties: address: + description: Address is the DNS address of the topology server. maxLength: 512 minLength: 1 type: string implementation: + description: Implementation defines the client implementation + (e.g. "etcd"). maxLength: 63 minLength: 1 type: string rootPath: + description: RootPath is the etcd prefix for this cluster. maxLength: 512 minLength: 1 type: string @@ -71,15 +77,20 @@ spec: - rootPath type: object images: - description: Images required for child shards. + description: Images defines the container images used for child shards + - defined globally in MultigresCluster. properties: multiorch: + description: MultiOrch is the image for the shard orchestrator. maxLength: 512 type: string multipooler: + description: MultiPooler is the image for the connection pooler + sidecar. maxLength: 512 type: string postgres: + description: Postgres is the image for the postgres database. maxLength: 512 type: string required: @@ -95,7 +106,8 @@ spec: pushed down to the TableGroup. properties: multiorch: - description: MultiOrch fully resolved spec. + description: MultiOrch is the fully resolved configuration for + the orchestrator. properties: affinity: description: Affinity defines the pod's scheduling constraints. @@ -1031,14 +1043,14 @@ spec: maxLength: 63 minLength: 1 type: string - maxItems: 100 + maxItems: 50 type: array podAnnotations: additionalProperties: type: string description: PodAnnotations are annotations to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: annotation keys must be <64 chars and values @@ -1049,7 +1061,7 @@ spec: type: string description: PodLabels are additional labels to add to the pods. - maxProperties: 64 + maxProperties: 20 type: object x-kubernetes-validations: - message: label keys and values must be <64 chars @@ -1121,6 +1133,7 @@ spec: type: object type: object name: + description: Name is the identifier of the shard. maxLength: 63 type: string pools: @@ -2065,7 +2078,7 @@ spec: maxLength: 63 minLength: 1 type: string - maxItems: 100 + maxItems: 50 type: array multipooler: description: Multipooler container configuration. @@ -2225,8 +2238,8 @@ spec: - readOnly type: string type: object - description: Pools fully resolved spec. - maxProperties: 32 + description: Pools is the map of fully resolved data pool configurations. + maxProperties: 8 type: object x-kubernetes-validations: - message: pool names must be < 63 chars @@ -2236,9 +2249,10 @@ spec: - name - pools type: object - maxItems: 128 + maxItems: 32 type: array tableGroupName: + description: TableGroupName is the name of this table group. maxLength: 63 type: string required: diff --git a/pkg/cluster-handler/controller/multigrescluster/constants.go b/pkg/cluster-handler/controller/multigrescluster/constants.go new file mode 100644 index 00000000..fc993295 --- /dev/null +++ b/pkg/cluster-handler/controller/multigrescluster/constants.go @@ -0,0 +1,23 @@ +package multigrescluster + +// NOTE: We may want to consider moving this to different module/package before implementing the Mutating Webhook. +// This separation is critical to prevent circular dependencies between the Webhook and Controller packages +// and ensures that the "Level 4" defaulting logic is reusable as a Single Source of Truth for both the reconciliation loop +// and admission requests. + +const ( + // DefaultEtcdReplicas is the default number of replicas for the managed Etcd cluster if not specified. + DefaultEtcdReplicas int32 = 3 + + // DefaultAdminReplicas is the default number of replicas for the MultiAdmin deployment if not specified. + DefaultAdminReplicas int32 = 1 + + // FallbackCoreTemplate is the name of the template to look for if no specific template is referenced. + FallbackCoreTemplate = "default" + + // FallbackCellTemplate is the name of the template to look for if no specific template is referenced. + FallbackCellTemplate = "default" + + // FallbackShardTemplate is the name of the template to look for if no specific template is referenced. + FallbackShardTemplate = "default" +) diff --git a/pkg/cluster-handler/controller/multigrescluster/dummy.go b/pkg/cluster-handler/controller/multigrescluster/dummy.go deleted file mode 100644 index f8681eea..00000000 --- a/pkg/cluster-handler/controller/multigrescluster/dummy.go +++ /dev/null @@ -1,5 +0,0 @@ -package multigrescluster - -func Dummy() string { - return "dummy string from cluster-handler's multigrescluster controller" -} diff --git a/pkg/cluster-handler/controller/multigrescluster/integration_test.go b/pkg/cluster-handler/controller/multigrescluster/integration_test.go new file mode 100644 index 00000000..442ec512 --- /dev/null +++ b/pkg/cluster-handler/controller/multigrescluster/integration_test.go @@ -0,0 +1,331 @@ +//go:build integration +// +build integration + +package multigrescluster_test + +import ( + "path/filepath" + "slices" + "testing" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/cluster-handler/controller/multigrescluster" + "github.com/numtide/multigres-operator/pkg/testutil" +) + +func TestSetupWithManager(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + mgr := testutil.SetUpEnvtestManager(t, scheme, + testutil.WithCRDPaths( + filepath.Join("../../../../", "config", "crd", "bases"), + ), + ) + + if err := (&multigrescluster.MultigresClusterReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr, controller.Options{ + SkipNameValidation: ptr.To(true), + }); err != nil { + t.Fatalf("Failed to create controller, %v", err) + } +} + +func TestMultigresClusterReconciliation(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + const ( + clusterName = "test-integration-cluster" + namespace = "default" + ) + + tests := map[string]struct { + cluster *multigresv1alpha1.MultigresCluster + wantResources []client.Object + }{ + "full cluster integration": { + cluster: &multigresv1alpha1.MultigresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: multigresv1alpha1.MultigresClusterSpec{ + Images: multigresv1alpha1.ClusterImages{ + MultiGateway: "gateway:latest", + MultiOrch: "orch:latest", + MultiPooler: "pooler:latest", + MultiAdmin: "admin:latest", + Postgres: "postgres:15", + }, + GlobalTopoServer: multigresv1alpha1.GlobalTopoServerSpec{ + Etcd: &multigresv1alpha1.EtcdSpec{Image: "etcd:latest"}, + }, + MultiAdmin: multigresv1alpha1.MultiAdminConfig{ + Spec: &multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(1))}, + }, + Cells: []multigresv1alpha1.CellConfig{ + {Name: "zone-a", Zone: "us-east-1a", Spec: &multigresv1alpha1.CellInlineSpec{ + MultiGateway: multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(1))}, + }}, + }, + Databases: []multigresv1alpha1.DatabaseConfig{ + { + Name: "db1", + TableGroups: []multigresv1alpha1.TableGroupConfig{ + {Name: "tg1", Shards: []multigresv1alpha1.ShardConfig{{ + Name: "s1", + Spec: &multigresv1alpha1.ShardInlineSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{StatelessSpec: multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(1))}}, + Pools: map[string]multigresv1alpha1.PoolSpec{ + "primary": { + ReplicasPerCell: ptr.To(int32(1)), + Type: "readWrite", + // We must explicitly assign cells so they propagate to MultiOrch + Cells: []multigresv1alpha1.CellName{"zone-a"}, + }, + }, + }, + }}}, + }, + }, + }, + }, + }, + wantResources: []client.Object{ + // Note: We verify child resources first. Parent finalizer is checked manually below. + + // 1. Global TopoServer + &multigresv1alpha1.TopoServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName + "-global-topo", + Namespace: namespace, + Labels: clusterLabels(t, clusterName, "", ""), + OwnerReferences: clusterOwnerRefs(t, clusterName), + }, + Spec: multigresv1alpha1.TopoServerSpec{ + Etcd: &multigresv1alpha1.EtcdSpec{ + Image: "etcd:latest", + Replicas: ptr.To(int32(3)), // Default from logic + }, + }, + }, + // 2. MultiAdmin Deployment + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName + "-multiadmin", + Namespace: namespace, + Labels: clusterLabels(t, clusterName, "multiadmin", ""), + OwnerReferences: clusterOwnerRefs(t, clusterName), + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(1)), + Selector: &metav1.LabelSelector{ + MatchLabels: clusterLabels(t, clusterName, "multiadmin", ""), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: clusterLabels(t, clusterName, "multiadmin", ""), + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "multiadmin", + Image: "admin:latest", + }, + }, + }, + }, + }, + }, + // 3. Cell + &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName + "-zone-a", + Namespace: namespace, + Labels: clusterLabels(t, clusterName, "", "zone-a"), + OwnerReferences: clusterOwnerRefs(t, clusterName), + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone-a", + Zone: "us-east-1a", + MultiGatewayImage: "gateway:latest", + MultiGateway: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(1)), + }, + AllCells: []multigresv1alpha1.CellName{"zone-a"}, + GlobalTopoServer: multigresv1alpha1.GlobalTopoServerRef{ + Address: clusterName + "-global-topo-client." + namespace + ".svc:2379", + RootPath: "/multigres/global", + Implementation: "etcd2", + }, + TopologyReconciliation: multigresv1alpha1.TopologyReconciliation{ + RegisterCell: true, + PrunePoolers: true, + }, + }, + }, + // 4. TableGroup + &multigresv1alpha1.TableGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName + "-db1-tg1", + Namespace: namespace, + Labels: map[string]string{ + "multigres.com/cluster": clusterName, + "multigres.com/database": "db1", + "multigres.com/tablegroup": "tg1", + }, + OwnerReferences: clusterOwnerRefs(t, clusterName), + }, + Spec: multigresv1alpha1.TableGroupSpec{ + DatabaseName: "db1", + TableGroupName: "tg1", + Images: multigresv1alpha1.ShardImages{ + MultiOrch: "orch:latest", + MultiPooler: "pooler:latest", + Postgres: "postgres:15", + }, + GlobalTopoServer: multigresv1alpha1.GlobalTopoServerRef{ + Address: clusterName + "-global-topo-client." + namespace + ".svc:2379", + RootPath: "/multigres/global", + Implementation: "etcd2", + }, + Shards: []multigresv1alpha1.ShardResolvedSpec{ + { + Name: "s1", + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + // Controller logic defaults cells if empty + Cells: []multigresv1alpha1.CellName{"zone-a"}, + StatelessSpec: multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(1))}, + }, + Pools: map[string]multigresv1alpha1.PoolSpec{ + "primary": { + ReplicasPerCell: ptr.To(int32(1)), + Type: "readWrite", + Cells: []multigresv1alpha1.CellName{"zone-a"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + ctx := t.Context() + + // 1. Setup Envtest and Manager + mgr := testutil.SetUpEnvtestManager(t, scheme, + testutil.WithCRDPaths( + filepath.Join("../../../../", "config", "crd", "bases"), + ), + ) + + // 2. Setup Watcher for all expected resources + // We explicitly watch the child resources we expect to be created. + watcher := testutil.NewResourceWatcher(t, ctx, mgr, + testutil.WithCmpOpts( + testutil.IgnoreMetaRuntimeFields(), + testutil.IgnoreServiceRuntimeFields(), + testutil.IgnoreDeploymentRuntimeFields(), + testutil.IgnorePodSpecDefaults(), + testutil.IgnoreDeploymentSpecDefaults(), + ), + testutil.WithExtraResource( + &multigresv1alpha1.MultigresCluster{}, + &multigresv1alpha1.TopoServer{}, + &multigresv1alpha1.Cell{}, + &multigresv1alpha1.TableGroup{}, + ), + // Extend timeout as this is a "root" controller triggering other things + testutil.WithTimeout(10*time.Second), + ) + k8sClient := mgr.GetClient() + + // 3. Setup and Start Controller + reconciler := &multigrescluster.MultigresClusterReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + } + + // Pass SkipNameValidation via options to avoid controller name collisions in parallel tests + if err := reconciler.SetupWithManager(mgr, controller.Options{ + SkipNameValidation: ptr.To(true), + }); err != nil { + t.Fatalf("Failed to create controller, %v", err) + } + + // 4. Create the Input + if err := k8sClient.Create(ctx, tc.cluster); err != nil { + t.Fatalf("Failed to create the initial cluster, %v", err) + } + + // 5. Assert Logic: Wait for Children + // This ensures the controller has run and reconciled at least once successfully + if err := watcher.WaitForMatch(tc.wantResources...); err != nil { + t.Errorf("Resources mismatch:\n%v", err) + } + + // 6. Verify Parent Finalizer (Manual Check) + // We check this manually to avoid fighting with status/spec diffs in the watcher + fetchedCluster := &multigresv1alpha1.MultigresCluster{} + if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(tc.cluster), fetchedCluster); err != nil { + t.Fatalf("Failed to get cluster: %v", err) + } + + if !slices.Contains(fetchedCluster.Finalizers, "multigres.com/finalizer") { + t.Errorf("Expected finalizer 'multigres.com/finalizer' to be present, got %v", fetchedCluster.Finalizers) + } + }) + } +} + +// Helpers + +func clusterLabels(t testing.TB, clusterName, app, cell string) map[string]string { + t.Helper() + l := map[string]string{ + "multigres.com/cluster": clusterName, + } + if app != "" { + l["app"] = app + } + if cell != "" { + l["multigres.com/cell"] = cell + } + return l +} + +func clusterOwnerRefs(t testing.TB, clusterName string) []metav1.OwnerReference { + t.Helper() + return []metav1.OwnerReference{{ + APIVersion: "multigres.com/v1alpha1", + Kind: "MultigresCluster", + Name: clusterName, + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }} +} diff --git a/pkg/cluster-handler/controller/multigrescluster/multigrescluster_controller.go b/pkg/cluster-handler/controller/multigrescluster/multigrescluster_controller.go new file mode 100644 index 00000000..fbc045ae --- /dev/null +++ b/pkg/cluster-handler/controller/multigrescluster/multigrescluster_controller.go @@ -0,0 +1,582 @@ +package multigrescluster + +import ( + "context" + "fmt" + "sort" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +const ( + finalizerName = "multigres.com/finalizer" +) + +// MultigresClusterReconciler reconciles a MultigresCluster object. +type MultigresClusterReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// Reconcile reads that state of the cluster for a MultigresCluster object and makes changes based on the state read +// and what is in the MultigresCluster.Spec. +// +// +kubebuilder:rbac:groups=multigres.com,resources=multigresclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=multigres.com,resources=multigresclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=multigres.com,resources=multigresclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups=multigres.com,resources=coretemplates;celltemplates;shardtemplates,verbs=get;list;watch +// +kubebuilder:rbac:groups=multigres.com,resources=cells;tablegroups;toposervers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +func (r *MultigresClusterReconciler) Reconcile( + ctx context.Context, + req ctrl.Request, +) (ctrl.Result, error) { + l := log.FromContext(ctx) + + cluster := &multigresv1alpha1.MultigresCluster{} + err := r.Get(ctx, req.NamespacedName, cluster) + if err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, fmt.Errorf("failed to get MultigresCluster: %w", err) + } + + if !cluster.DeletionTimestamp.IsZero() { + return r.handleDelete(ctx, cluster) + } + + if !controllerutil.ContainsFinalizer(cluster, finalizerName) { + controllerutil.AddFinalizer(cluster, finalizerName) + if err := r.Update(ctx, cluster); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to add finalizer: %w", err) + } + return ctrl.Result{}, nil + } + + resolver := &TemplateResolver{ + Client: r.Client, + Namespace: cluster.Namespace, + Defaults: cluster.Spec.TemplateDefaults, + } + + if err := r.reconcileGlobalComponents(ctx, cluster, resolver); err != nil { + l.Error(err, "Failed to reconcile global components") + return ctrl.Result{}, err + } + + if err := r.reconcileCells(ctx, cluster, resolver); err != nil { + l.Error(err, "Failed to reconcile cells") + return ctrl.Result{}, err + } + + if err := r.reconcileDatabases(ctx, cluster, resolver); err != nil { + l.Error(err, "Failed to reconcile databases") + return ctrl.Result{}, err + } + + if err := r.updateStatus(ctx, cluster); err != nil { + l.Error(err, "Failed to update status") + return ctrl.Result{}, err + } + + return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil +} + +func (r *MultigresClusterReconciler) handleDelete( + ctx context.Context, + cluster *multigresv1alpha1.MultigresCluster, +) (ctrl.Result, error) { + if controllerutil.ContainsFinalizer(cluster, finalizerName) { + if err := r.checkChildrenDeleted(ctx, cluster); err != nil { + return ctrl.Result{}, err + } + controllerutil.RemoveFinalizer(cluster, finalizerName) + if err := r.Update(ctx, cluster); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to remove finalizer: %w", err) + } + } + return ctrl.Result{}, nil +} + +func (r *MultigresClusterReconciler) checkChildrenDeleted( + ctx context.Context, + cluster *multigresv1alpha1.MultigresCluster, +) error { + cells := &multigresv1alpha1.CellList{} + if err := r.List(ctx, cells, client.InNamespace(cluster.Namespace), client.MatchingLabels{"multigres.com/cluster": cluster.Name}); err != nil { + return fmt.Errorf("failed to list cells: %w", err) + } + if len(cells.Items) > 0 { + return fmt.Errorf("cells still exist") + } + + tgs := &multigresv1alpha1.TableGroupList{} + if err := r.List(ctx, tgs, client.InNamespace(cluster.Namespace), client.MatchingLabels{"multigres.com/cluster": cluster.Name}); err != nil { + return fmt.Errorf("failed to list tablegroups: %w", err) + } + if len(tgs.Items) > 0 { + return fmt.Errorf("tablegroups still exist") + } + + ts := &multigresv1alpha1.TopoServerList{} + if err := r.List(ctx, ts, client.InNamespace(cluster.Namespace), client.MatchingLabels{"multigres.com/cluster": cluster.Name}); err != nil { + return fmt.Errorf("failed to list toposervers: %w", err) + } + if len(ts.Items) > 0 { + return fmt.Errorf("toposervers still exist") + } + + return nil +} + +func (r *MultigresClusterReconciler) reconcileGlobalComponents( + ctx context.Context, + cluster *multigresv1alpha1.MultigresCluster, + resolver *TemplateResolver, +) error { + if err := r.reconcileGlobalTopoServer(ctx, cluster, resolver); err != nil { + return err + } + if err := r.reconcileMultiAdmin(ctx, cluster, resolver); err != nil { + return err + } + return nil +} + +func (r *MultigresClusterReconciler) reconcileGlobalTopoServer( + ctx context.Context, + cluster *multigresv1alpha1.MultigresCluster, + resolver *TemplateResolver, +) error { + tplName := cluster.Spec.TemplateDefaults.CoreTemplate + if cluster.Spec.GlobalTopoServer.TemplateRef != "" { + tplName = cluster.Spec.GlobalTopoServer.TemplateRef + } + + tpl, err := resolver.ResolveCoreTemplate(ctx, tplName) + if err != nil { + return fmt.Errorf("failed to resolve topo template: %w", err) + } + + spec := ResolveGlobalTopo(&cluster.Spec.GlobalTopoServer, tpl) + if spec.Etcd != nil { + ts := &multigresv1alpha1.TopoServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-global-topo", + Namespace: cluster.Namespace, + Labels: map[string]string{"multigres.com/cluster": cluster.Name}, + }, + } + if _, err := controllerutil.CreateOrUpdate(ctx, r.Client, ts, func() error { + replicas := DefaultEtcdReplicas + if spec.Etcd.Replicas != nil { + replicas = *spec.Etcd.Replicas + } + + ts.Spec.Etcd = &multigresv1alpha1.EtcdSpec{ + Image: spec.Etcd.Image, + Replicas: &replicas, + Storage: spec.Etcd.Storage, + Resources: spec.Etcd.Resources, + } + return controllerutil.SetControllerReference(cluster, ts, r.Scheme) + }); err != nil { + return fmt.Errorf("failed to create/update global topo: %w", err) + } + } + return nil +} + +func (r *MultigresClusterReconciler) reconcileMultiAdmin( + ctx context.Context, + cluster *multigresv1alpha1.MultigresCluster, + resolver *TemplateResolver, +) error { + tplName := cluster.Spec.TemplateDefaults.CoreTemplate + if cluster.Spec.MultiAdmin.TemplateRef != "" { + tplName = cluster.Spec.MultiAdmin.TemplateRef + } + + tpl, err := resolver.ResolveCoreTemplate(ctx, tplName) + if err != nil { + return fmt.Errorf("failed to resolve admin template: %w", err) + } + + spec := ResolveMultiAdmin(&cluster.Spec.MultiAdmin, tpl) + if spec != nil { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-multiadmin", + Namespace: cluster.Namespace, + Labels: map[string]string{ + "multigres.com/cluster": cluster.Name, + "app": "multiadmin", + }, + }, + } + if _, err := controllerutil.CreateOrUpdate(ctx, r.Client, deploy, func() error { + replicas := DefaultAdminReplicas + if spec.Replicas != nil { + replicas = *spec.Replicas + } + deploy.Spec.Replicas = &replicas + deploy.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "multiadmin", "multigres.com/cluster": cluster.Name}, + } + deploy.Spec.Template = corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "multiadmin", "multigres.com/cluster": cluster.Name}, + }, + Spec: corev1.PodSpec{ + ImagePullSecrets: cluster.Spec.Images.ImagePullSecrets, + Containers: []corev1.Container{ + { + Name: "multiadmin", + Image: cluster.Spec.Images.MultiAdmin, + Resources: spec.Resources, + }, + }, + Affinity: spec.Affinity, + }, + } + return controllerutil.SetControllerReference(cluster, deploy, r.Scheme) + }); err != nil { + return fmt.Errorf("failed to create/update multiadmin: %w", err) + } + } + return nil +} + +func (r *MultigresClusterReconciler) reconcileCells( + ctx context.Context, + cluster *multigresv1alpha1.MultigresCluster, + resolver *TemplateResolver, +) error { + existingCells := &multigresv1alpha1.CellList{} + if err := r.List(ctx, existingCells, client.InNamespace(cluster.Namespace), client.MatchingLabels{"multigres.com/cluster": cluster.Name}); err != nil { + return fmt.Errorf("failed to list existing cells: %w", err) + } + + globalTopoRef, err := r.getGlobalTopoRef(ctx, cluster, resolver) + if err != nil { + return fmt.Errorf("failed to get global topo ref: %w", err) + } + + activeCellNames := make(map[string]bool, len(cluster.Spec.Cells)) + + allCellNames := []multigresv1alpha1.CellName{} + for _, cellCfg := range cluster.Spec.Cells { + allCellNames = append(allCellNames, multigresv1alpha1.CellName(cellCfg.Name)) + } + + for _, cellCfg := range cluster.Spec.Cells { + activeCellNames[cellCfg.Name] = true + + tpl, err := resolver.ResolveCellTemplate(ctx, cellCfg.CellTemplate) + if err != nil { + return fmt.Errorf("failed to resolve cell template '%s': %w", cellCfg.CellTemplate, err) + } + + gatewaySpec, localTopoSpec := MergeCellConfig(tpl, cellCfg.Overrides, cellCfg.Spec) + + cellCR := &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-" + cellCfg.Name, + Namespace: cluster.Namespace, + Labels: map[string]string{ + "multigres.com/cluster": cluster.Name, + "multigres.com/cell": cellCfg.Name, + }, + }, + } + + if _, err := controllerutil.CreateOrUpdate(ctx, r.Client, cellCR, func() error { + cellCR.Spec.Name = cellCfg.Name + cellCR.Spec.Zone = cellCfg.Zone + cellCR.Spec.Region = cellCfg.Region + cellCR.Spec.MultiGatewayImage = cluster.Spec.Images.MultiGateway + cellCR.Spec.MultiGateway = gatewaySpec + cellCR.Spec.AllCells = allCellNames + + cellCR.Spec.GlobalTopoServer = globalTopoRef + + cellCR.Spec.TopoServer = localTopoSpec + + cellCR.Spec.TopologyReconciliation = multigresv1alpha1.TopologyReconciliation{ + RegisterCell: true, + PrunePoolers: true, + } + + return controllerutil.SetControllerReference(cluster, cellCR, r.Scheme) + }); err != nil { + return fmt.Errorf("failed to create/update cell '%s': %w", cellCfg.Name, err) + } + } + + for _, item := range existingCells.Items { + if !activeCellNames[item.Spec.Name] { + if err := r.Delete(ctx, &item); err != nil { + return fmt.Errorf("failed to delete orphaned cell '%s': %w", item.Name, err) + } + } + } + + return nil +} + +func (r *MultigresClusterReconciler) reconcileDatabases( + ctx context.Context, + cluster *multigresv1alpha1.MultigresCluster, + resolver *TemplateResolver, +) error { + existingTGs := &multigresv1alpha1.TableGroupList{} + if err := r.List(ctx, existingTGs, client.InNamespace(cluster.Namespace), client.MatchingLabels{"multigres.com/cluster": cluster.Name}); err != nil { + return fmt.Errorf("failed to list existing tablegroups: %w", err) + } + + globalTopoRef, err := r.getGlobalTopoRef(ctx, cluster, resolver) + if err != nil { + return fmt.Errorf("failed to get global topo ref: %w", err) + } + + activeTGNames := make(map[string]bool) + + for _, db := range cluster.Spec.Databases { + for _, tg := range db.TableGroups { + tgNameFull := fmt.Sprintf("%s-%s-%s", cluster.Name, db.Name, tg.Name) + if len(tgNameFull) > 50 { + return fmt.Errorf( + "TableGroup name '%s' exceeds 50 characters; limit required to allow for shard resource suffixing", + tgNameFull, + ) + } + + activeTGNames[tgNameFull] = true + + resolvedShards := []multigresv1alpha1.ShardResolvedSpec{} + + for _, shard := range tg.Shards { + tpl, err := resolver.ResolveShardTemplate(ctx, shard.ShardTemplate) + if err != nil { + return fmt.Errorf( + "failed to resolve shard template '%s': %w", + shard.ShardTemplate, + err, + ) + } + + orch, pools := MergeShardConfig(tpl, shard.Overrides, shard.Spec) + + // Default MultiOrch Cells if empty (Consensus safety) + // If 'cells' is empty, it defaults to all cells where pools are defined. + if len(orch.Cells) == 0 { + uniqueCells := make(map[string]bool) + for _, pool := range pools { + for _, cell := range pool.Cells { + uniqueCells[string(cell)] = true + } + } + for c := range uniqueCells { + orch.Cells = append(orch.Cells, multigresv1alpha1.CellName(c)) + } + // Sort for deterministic output + sort.Slice(orch.Cells, func(i, j int) bool { + return string(orch.Cells[i]) < string(orch.Cells[j]) + }) + } + + resolvedShards = append(resolvedShards, multigresv1alpha1.ShardResolvedSpec{ + Name: shard.Name, + MultiOrch: orch, + Pools: pools, + }) + } + + tgCR := &multigresv1alpha1.TableGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: tgNameFull, + Namespace: cluster.Namespace, + Labels: map[string]string{ + "multigres.com/cluster": cluster.Name, + "multigres.com/database": db.Name, + "multigres.com/tablegroup": tg.Name, + }, + }, + } + + if _, err := controllerutil.CreateOrUpdate(ctx, r.Client, tgCR, func() error { + tgCR.Spec.DatabaseName = db.Name + tgCR.Spec.TableGroupName = tg.Name + tgCR.Spec.IsDefault = tg.Default + tgCR.Spec.Images = multigresv1alpha1.ShardImages{ + MultiOrch: cluster.Spec.Images.MultiOrch, + MultiPooler: cluster.Spec.Images.MultiPooler, + Postgres: cluster.Spec.Images.Postgres, + } + tgCR.Spec.GlobalTopoServer = globalTopoRef + tgCR.Spec.Shards = resolvedShards + + return controllerutil.SetControllerReference(cluster, tgCR, r.Scheme) + }); err != nil { + return fmt.Errorf("failed to create/update tablegroup '%s': %w", tgNameFull, err) + } + } + } + + for _, item := range existingTGs.Items { + if !activeTGNames[item.Name] { + if err := r.Delete(ctx, &item); err != nil { + return fmt.Errorf("failed to delete orphaned tablegroup '%s': %w", item.Name, err) + } + } + } + + return nil +} + +func (r *MultigresClusterReconciler) getGlobalTopoRef( + ctx context.Context, + cluster *multigresv1alpha1.MultigresCluster, + resolver *TemplateResolver, +) (multigresv1alpha1.GlobalTopoServerRef, error) { + topoTplName := cluster.Spec.TemplateDefaults.CoreTemplate + if cluster.Spec.GlobalTopoServer.TemplateRef != "" { + topoTplName = cluster.Spec.GlobalTopoServer.TemplateRef + } + + topoTpl, err := resolver.ResolveCoreTemplate(ctx, topoTplName) + if err != nil { + return multigresv1alpha1.GlobalTopoServerRef{}, fmt.Errorf( + "failed to resolve global topo template: %w", + err, + ) + } + + topoSpec := ResolveGlobalTopo(&cluster.Spec.GlobalTopoServer, topoTpl) + + address := "" + if topoSpec.Etcd != nil { + address = fmt.Sprintf("%s-global-topo-client.%s.svc:2379", cluster.Name, cluster.Namespace) + } else if topoSpec.External != nil && len(topoSpec.External.Endpoints) > 0 { + address = string(topoSpec.External.Endpoints[0]) + } + + return multigresv1alpha1.GlobalTopoServerRef{ + Address: address, + RootPath: "/multigres/global", + Implementation: "etcd2", + }, nil +} + +func (r *MultigresClusterReconciler) updateStatus( + ctx context.Context, + cluster *multigresv1alpha1.MultigresCluster, +) error { + cluster.Status.ObservedGeneration = cluster.Generation + cluster.Status.Cells = make(map[string]multigresv1alpha1.CellStatusSummary) + cluster.Status.Databases = make(map[string]multigresv1alpha1.DatabaseStatusSummary) + + cells := &multigresv1alpha1.CellList{} + if err := r.List(ctx, cells, client.InNamespace(cluster.Namespace), client.MatchingLabels{"multigres.com/cluster": cluster.Name}); err != nil { + return fmt.Errorf("failed to list cells for status: %w", err) + } + + for _, c := range cells.Items { + ready := false + for _, cond := range c.Status.Conditions { + if cond.Type == "Available" && cond.Status == "True" { + ready = true + break + } + } + cluster.Status.Cells[c.Spec.Name] = multigresv1alpha1.CellStatusSummary{ + Ready: ready, + GatewayReplicas: c.Status.GatewayReplicas, + } + } + + tgs := &multigresv1alpha1.TableGroupList{} + if err := r.List(ctx, tgs, client.InNamespace(cluster.Namespace), client.MatchingLabels{"multigres.com/cluster": cluster.Name}); err != nil { + return fmt.Errorf("failed to list tablegroups for status: %w", err) + } + + dbShards := make(map[string]struct { + Ready int32 + Total int32 + }) + + for _, tg := range tgs.Items { + stat := dbShards[tg.Spec.DatabaseName] + stat.Ready += tg.Status.ReadyShards + stat.Total += tg.Status.TotalShards + dbShards[tg.Spec.DatabaseName] = stat + } + + for dbName, stat := range dbShards { + cluster.Status.Databases[dbName] = multigresv1alpha1.DatabaseStatusSummary{ + ReadyShards: stat.Ready, + TotalShards: stat.Total, + } + } + + allCellsReady := true + for _, c := range cluster.Status.Cells { + if !c.Ready { + allCellsReady = false + break + } + } + + statusStr := metav1.ConditionFalse + if allCellsReady && len(cluster.Status.Cells) > 0 { + statusStr = metav1.ConditionTrue + } + + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: "Available", + Status: statusStr, + Reason: "AggregatedStatus", + Message: "Aggregation of cell availability", + LastTransitionTime: metav1.Now(), + }) + + if err := r.Status().Update(ctx, cluster); err != nil { + return fmt.Errorf("failed to update cluster status: %w", err) + } + + return nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *MultigresClusterReconciler) SetupWithManager( + mgr ctrl.Manager, + opts ...controller.Options, +) error { + controllerOpts := controller.Options{} + if len(opts) > 0 { + controllerOpts = opts[0] + } + + return ctrl.NewControllerManagedBy(mgr). + For(&multigresv1alpha1.MultigresCluster{}). + Owns(&multigresv1alpha1.Cell{}). + Owns(&multigresv1alpha1.TableGroup{}). + Owns(&multigresv1alpha1.TopoServer{}). + Owns(&appsv1.Deployment{}). + WithOptions(controllerOpts). + Complete(r) +} diff --git a/pkg/cluster-handler/controller/multigrescluster/multigrescluster_controller_test.go b/pkg/cluster-handler/controller/multigrescluster/multigrescluster_controller_test.go new file mode 100644 index 00000000..f7215911 --- /dev/null +++ b/pkg/cluster-handler/controller/multigrescluster/multigrescluster_controller_test.go @@ -0,0 +1,1370 @@ +package multigrescluster + +import ( + "errors" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/testutil" +) + +// setupFixtures helper returns a fresh set of test objects to ensure isolation between test functions. +func setupFixtures(tb testing.TB) ( + *multigresv1alpha1.CoreTemplate, + *multigresv1alpha1.CellTemplate, + *multigresv1alpha1.ShardTemplate, + *multigresv1alpha1.MultigresCluster, + string, string, string, +) { + tb.Helper() + + clusterName := "test-cluster" + namespace := "default" + finalizerName := "multigres.com/finalizer" + + coreTpl := &multigresv1alpha1.CoreTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "default-core", Namespace: namespace}, + Spec: multigresv1alpha1.CoreTemplateSpec{ + GlobalTopoServer: &multigresv1alpha1.TopoServerSpec{ + Etcd: &multigresv1alpha1.EtcdSpec{ + Image: "etcd:v1", + Replicas: ptr.To(int32(3)), + }, + }, + MultiAdmin: &multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(1)), + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: parseQty("100m")}, + }, + }, + }, + } + + cellTpl := &multigresv1alpha1.CellTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "default-cell", Namespace: namespace}, + Spec: multigresv1alpha1.CellTemplateSpec{ + MultiGateway: &multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(2)), + }, + }, + } + + shardTpl := &multigresv1alpha1.ShardTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "default-shard", Namespace: namespace}, + Spec: multigresv1alpha1.ShardTemplateSpec{ + MultiOrch: &multigresv1alpha1.MultiOrchSpec{ + StatelessSpec: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(3)), + }, + }, + Pools: map[string]multigresv1alpha1.PoolSpec{ + "primary": { + ReplicasPerCell: ptr.To(int32(2)), + Type: "readWrite", + }, + }, + }, + } + + baseCluster := &multigresv1alpha1.MultigresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + Finalizers: []string{finalizerName}, + }, + Spec: multigresv1alpha1.MultigresClusterSpec{ + Images: multigresv1alpha1.ClusterImages{ + MultiGateway: "gateway:latest", + MultiOrch: "orch:latest", + MultiPooler: "pooler:latest", + MultiAdmin: "admin:latest", + Postgres: "postgres:15", + }, + TemplateDefaults: multigresv1alpha1.TemplateDefaults{ + CoreTemplate: "default-core", + CellTemplate: "default-cell", + ShardTemplate: "default-shard", + }, + GlobalTopoServer: multigresv1alpha1.GlobalTopoServerSpec{ + TemplateRef: "default-core", + }, + MultiAdmin: multigresv1alpha1.MultiAdminConfig{ + TemplateRef: "default-core", + }, + Cells: []multigresv1alpha1.CellConfig{ + {Name: "zone-a", Zone: "us-east-1a"}, + }, + Databases: []multigresv1alpha1.DatabaseConfig{ + { + Name: "db1", + TableGroups: []multigresv1alpha1.TableGroupConfig{ + {Name: "tg1", Shards: []multigresv1alpha1.ShardConfig{{Name: "s1"}}}, + }, + }, + }, + }, + } + + return coreTpl, cellTpl, shardTpl, baseCluster, clusterName, namespace, finalizerName +} + +func TestMultigresClusterReconciler_Reconcile_Success(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + coreTpl, cellTpl, shardTpl, baseCluster, clusterName, namespace, finalizerName := setupFixtures( + t, + ) + + tests := map[string]struct { + multigrescluster *multigresv1alpha1.MultigresCluster + existingObjects []client.Object + preReconcileUpdate func(testing.TB, *multigresv1alpha1.MultigresCluster) + skipClusterCreation bool + validate func(testing.TB, client.Client) + }{ + "Create: Adds Finalizer": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Finalizers = nil // Explicitly remove finalizer to test addition + }, + existingObjects: []client.Object{}, + validate: func(t testing.TB, c client.Client) { + ctx := t.Context() + updatedCluster := &multigresv1alpha1.MultigresCluster{} + if err := c.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, updatedCluster); err != nil { + t.Fatalf("failed to get updated cluster: %v", err) + } + if !controllerutil.ContainsFinalizer(updatedCluster, finalizerName) { + t.Error("Finalizer was not added to Cluster") + } + }, + }, + "Create: Full Cluster Creation with Templates": { + validate: func(t testing.TB, c client.Client) { + ctx := t.Context() + updatedCluster := &multigresv1alpha1.MultigresCluster{} + if err := c.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, updatedCluster); err != nil { + t.Fatal(err) + } + if !controllerutil.ContainsFinalizer(updatedCluster, finalizerName) { + t.Error("Finalizer was not added to Cluster") + } + + // Verify Wiring + cell := &multigresv1alpha1.Cell{} + if err := c.Get(ctx, types.NamespacedName{Name: clusterName + "-zone-a", Namespace: namespace}, cell); err != nil { + t.Fatal("Expected Cell 'zone-a' to exist") + } + + expectedAddr := clusterName + "-global-topo-client." + namespace + ".svc:2379" + if got, want := cell.Spec.GlobalTopoServer.Address, expectedAddr; got != want { + t.Errorf("Wiring Bug! Cell has wrong Topo Address got %q, want %q", got, want) + } + }, + }, + "Create: Independent Templates (Topo vs Admin)": { + // Using preReconcileUpdate instead of an explicit object to leverage defaults + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.TemplateDefaults.CoreTemplate = "" // clear default + c.Spec.GlobalTopoServer.TemplateRef = "topo-core" + c.Spec.MultiAdmin.TemplateRef = "admin-core" + }, + existingObjects: []client.Object{ + cellTpl, shardTpl, + &multigresv1alpha1.CoreTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "topo-core", Namespace: namespace}, + Spec: multigresv1alpha1.CoreTemplateSpec{ + GlobalTopoServer: &multigresv1alpha1.TopoServerSpec{ + Etcd: &multigresv1alpha1.EtcdSpec{Image: "etcd:topo"}, + }, + }, + }, + &multigresv1alpha1.CoreTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "admin-core", Namespace: namespace}, + Spec: multigresv1alpha1.CoreTemplateSpec{ + MultiAdmin: &multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(5))}, + }, + }, + }, + validate: func(t testing.TB, c client.Client) { + ctx := t.Context() + + // Check Topo uses topo-core + ts := &multigresv1alpha1.TopoServer{} + if err := c.Get(ctx, types.NamespacedName{Name: clusterName + "-global-topo", Namespace: namespace}, ts); err != nil { + t.Fatal(err) + } + if got, want := ts.Spec.Etcd.Image, "etcd:topo"; got != want { + t.Errorf("TopoServer image mismatch got %q, want %q", got, want) + } + + // Check Admin uses admin-core + deploy := &appsv1.Deployment{} + if err := c.Get(ctx, types.NamespacedName{Name: clusterName + "-multiadmin", Namespace: namespace}, deploy); err != nil { + t.Fatal(err) + } + if got, want := *deploy.Spec.Replicas, int32(5); got != want { + t.Errorf("MultiAdmin replicas mismatch got %d, want %d", got, want) + } + + // Verify Wiring for independent template + cell := &multigresv1alpha1.Cell{} + if err := c.Get(ctx, types.NamespacedName{Name: clusterName + "-zone-a", Namespace: namespace}, cell); err != nil { + t.Fatal("Expected Cell 'zone-a' to exist") + } + expectedAddr := clusterName + "-global-topo-client." + namespace + ".svc:2379" + if got, want := cell.Spec.GlobalTopoServer.Address, expectedAddr; got != want { + t.Errorf( + "Wiring Bug (Independent)! Cell has wrong Topo Address got %q, want %q", + got, + want, + ) + } + }, + }, + "Create: MultiAdmin TemplateRef Only": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.GlobalTopoServer = multigresv1alpha1.GlobalTopoServerSpec{ + External: &multigresv1alpha1.ExternalTopoServerSpec{ + Endpoints: []multigresv1alpha1.EndpointUrl{"http://ext:2379"}, + }, + } + c.Spec.MultiAdmin = multigresv1alpha1.MultiAdminConfig{TemplateRef: "default-core"} + c.Spec.TemplateDefaults.CoreTemplate = "" + }, + validate: func(t testing.TB, c client.Client) { + ctx := t.Context() + deploy := &appsv1.Deployment{} + if err := c.Get(ctx, types.NamespacedName{Name: clusterName + "-multiadmin", Namespace: namespace}, deploy); err != nil { + t.Fatal("MultiAdmin not created") + } + }, + }, + "Create: MultiOrch Placement Defaulting": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.TemplateDefaults.ShardTemplate = "" // Ensure no default template interferes + // Define a database with explicit pools containing cells + c.Spec.Databases = []multigresv1alpha1.DatabaseConfig{ + { + Name: "db-defaulting", + TableGroups: []multigresv1alpha1.TableGroupConfig{ + { + Name: "tg1", + Shards: []multigresv1alpha1.ShardConfig{ + { + Name: "0", + Spec: &multigresv1alpha1.ShardInlineSpec{ + // MultiOrch Cells explicitly EMPTY + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + StatelessSpec: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(1)), + }, + }, + Pools: map[string]multigresv1alpha1.PoolSpec{ + "pool-a": { + Cells: []multigresv1alpha1.CellName{"zone-a"}, + }, + "pool-b": { + Cells: []multigresv1alpha1.CellName{"zone-b"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + }, + existingObjects: []client.Object{coreTpl, cellTpl}, + validate: func(t testing.TB, c client.Client) { + ctx := t.Context() + // Fetch the child TableGroup (which holds the resolved shard spec) + tgName := clusterName + "-db-defaulting-tg1" + tg := &multigresv1alpha1.TableGroup{} + if err := c.Get(ctx, types.NamespacedName{Name: tgName, Namespace: namespace}, tg); err != nil { + t.Fatal(err) + } + + if got, want := len(tg.Spec.Shards), 1; got != want { + t.Fatalf("Shard count mismatch got %d, want %d", got, want) + } + + orchCells := tg.Spec.Shards[0].MultiOrch.Cells + wantCells := []multigresv1alpha1.CellName{"zone-a", "zone-b"} + if diff := cmp.Diff(wantCells, orchCells); diff != "" { + t.Errorf("MultiOrch cells mismatch (-want +got):\n%s", diff) + } + }, + }, + "Create: MultiAdmin with ImagePullSecrets": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.Images.ImagePullSecrets = []corev1.LocalObjectReference{{Name: "my-secret"}} + }, + validate: func(t testing.TB, c client.Client) { + ctx := t.Context() + deploy := &appsv1.Deployment{} + if err := c.Get(ctx, types.NamespacedName{Name: clusterName + "-multiadmin", Namespace: namespace}, deploy); err != nil { + t.Fatal("MultiAdmin not created") + } + want := []corev1.LocalObjectReference{{Name: "my-secret"}} + if diff := cmp.Diff(want, deploy.Spec.Template.Spec.ImagePullSecrets); diff != "" { + t.Errorf("ImagePullSecrets mismatch (-want +got):\n%s", diff) + } + }, + }, + "Create: Inline Etcd": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + // Use inline Etcd for GlobalTopoServer to test getGlobalTopoRef branch + c.Spec.GlobalTopoServer = multigresv1alpha1.GlobalTopoServerSpec{ + Etcd: &multigresv1alpha1.EtcdSpec{Image: "etcd:inline"}, + } + c.Spec.TemplateDefaults.CoreTemplate = "" + }, + validate: func(t testing.TB, c client.Client) { + ctx := t.Context() + ts := &multigresv1alpha1.TopoServer{} + if err := c.Get(ctx, types.NamespacedName{Name: clusterName + "-global-topo", Namespace: namespace}, ts); err != nil { + t.Fatal("Global TopoServer not created") + } + if got, want := ts.Spec.Etcd.Image, "etcd:inline"; got != want { + t.Errorf("TopoServer image mismatch got %q, want %q", got, want) + } + }, + }, + "Create: Defaults and Optional Components": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.TemplateDefaults.CoreTemplate = "minimal-core" + c.Spec.GlobalTopoServer = multigresv1alpha1.GlobalTopoServerSpec{} // Use defaults + // Remove MultiAdmin to test skip logic + c.Spec.MultiAdmin = multigresv1alpha1.MultiAdminConfig{} + }, + existingObjects: []client.Object{ + &multigresv1alpha1.CoreTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "minimal-core", Namespace: namespace}, + Spec: multigresv1alpha1.CoreTemplateSpec{ + GlobalTopoServer: &multigresv1alpha1.TopoServerSpec{ + Etcd: &multigresv1alpha1.EtcdSpec{Image: "etcd:v1"}, // Replicas nil + }, + }, + }, + cellTpl, shardTpl, + }, + validate: func(t testing.TB, c client.Client) { + ctx := t.Context() + // Verify TopoServer created with default replicas (3) + ts := &multigresv1alpha1.TopoServer{} + if err := c.Get(ctx, types.NamespacedName{Name: clusterName + "-global-topo", Namespace: namespace}, ts); err != nil { + t.Fatal("Global TopoServer not created") + } + if got, want := *ts.Spec.Etcd.Replicas, DefaultEtcdReplicas; got != want { + t.Errorf("Expected default replicas mismatch got %d, want %d", got, want) + } + // Verify MultiAdmin NOT created + deploy := &appsv1.Deployment{} + if err := c.Get(ctx, types.NamespacedName{Name: clusterName + "-multiadmin", Namespace: namespace}, deploy); !apierrors.IsNotFound( + err, + ) { + t.Error("MultiAdmin should not have been created") + } + }, + }, + "Create: Cell with Local Topo in Template": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.Cells[0].CellTemplate = "local-topo-cell" + }, + existingObjects: []client.Object{ + coreTpl, shardTpl, + &multigresv1alpha1.CellTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "local-topo-cell", Namespace: namespace}, + Spec: multigresv1alpha1.CellTemplateSpec{ + MultiGateway: &multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(1))}, + LocalTopoServer: &multigresv1alpha1.LocalTopoServerSpec{ + Etcd: &multigresv1alpha1.EtcdSpec{Image: "local-etcd:v1"}, + }, + }, + }, + }, + validate: func(t testing.TB, c client.Client) { + ctx := t.Context() + cell := &multigresv1alpha1.Cell{} + if err := c.Get(ctx, types.NamespacedName{Name: clusterName + "-zone-a", Namespace: namespace}, cell); err != nil { + t.Fatal(err) + } + // Updated to handle pointer dereference safety + if cell.Spec.TopoServer == nil || cell.Spec.TopoServer.Etcd == nil || + cell.Spec.TopoServer.Etcd.Image != "local-etcd:v1" { + t.Error("LocalTopoServer not propagated to Cell") + } + }, + }, + "Create: External Topo with Empty Endpoints": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.TemplateDefaults.CoreTemplate = "" + c.Spec.GlobalTopoServer = multigresv1alpha1.GlobalTopoServerSpec{ + External: &multigresv1alpha1.ExternalTopoServerSpec{ + Endpoints: []multigresv1alpha1.EndpointUrl{}, + }, + } + }, + existingObjects: []client.Object{coreTpl, cellTpl, shardTpl}, + validate: func(t testing.TB, c client.Client) { + ctx := t.Context() + cell := &multigresv1alpha1.Cell{} + if err := c.Get(ctx, types.NamespacedName{Name: clusterName + "-zone-a", Namespace: namespace}, cell); err != nil { + t.Fatal(err) + } + if got, want := cell.Spec.GlobalTopoServer.Address, ""; got != want { + t.Errorf("Address mismatch got %q, want %q", got, want) + } + }, + }, + "Create: Inline Specs and Missing Templates": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.GlobalTopoServer = multigresv1alpha1.GlobalTopoServerSpec{ + External: &multigresv1alpha1.ExternalTopoServerSpec{ + Endpoints: []multigresv1alpha1.EndpointUrl{"http://ext:2379"}, + }, + } + c.Spec.MultiAdmin = multigresv1alpha1.MultiAdminConfig{ + Spec: &multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(5))}, + } + c.Spec.Cells[0].Spec = &multigresv1alpha1.CellInlineSpec{ + MultiGateway: multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(4))}, + } + c.Spec.Databases[0].TableGroups[0].Shards[0].Spec = &multigresv1alpha1.ShardInlineSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + StatelessSpec: multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(3))}, + }, + } + c.Spec.TemplateDefaults = multigresv1alpha1.TemplateDefaults{} + }, + existingObjects: []client.Object{}, + validate: func(t testing.TB, c client.Client) { + ctx := t.Context() + cell := &multigresv1alpha1.Cell{} + if err := c.Get(ctx, types.NamespacedName{Name: clusterName + "-zone-a", Namespace: namespace}, cell); err != nil { + t.Fatal(err) + } + if got, want := *cell.Spec.MultiGateway.Replicas, int32(4); got != want { + t.Errorf("Cell inline spec ignored got %d, want %d", got, want) + } + }, + }, + "Status: Aggregation Logic": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.Databases = append( + c.Spec.Databases, + multigresv1alpha1.DatabaseConfig{ + Name: "db2", + TableGroups: []multigresv1alpha1.TableGroupConfig{}, + }, + ) + }, + // Here we insert the Cell WITH STATUS directly into existingObjects. + // The fake client will respect this state. + existingObjects: []client.Object{ + coreTpl, cellTpl, shardTpl, + &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName + "-zone-a", + Namespace: namespace, + Labels: map[string]string{"multigres.com/cluster": clusterName}, + }, + Spec: multigresv1alpha1.CellSpec{Name: "zone-a"}, + Status: multigresv1alpha1.CellStatus{ + Conditions: []metav1.Condition{ + {Type: "Available", Status: metav1.ConditionTrue}, + }, + }, + }, + }, + validate: func(t testing.TB, c client.Client) { + cluster := &multigresv1alpha1.MultigresCluster{} + if err := c.Get(t.Context(), types.NamespacedName{Name: clusterName, Namespace: namespace}, cluster); err != nil { + t.Fatalf("failed to get cluster: %v", err) + } + if !meta.IsStatusConditionTrue(cluster.Status.Conditions, "Available") { + t.Error("Cluster should be available") + } + }, + }, + "Delete: Allow Finalization if Children Gone": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + now := metav1.Now() + c.DeletionTimestamp = &now + c.Finalizers = []string{finalizerName} + }, + existingObjects: []client.Object{}, + validate: func(t testing.TB, c client.Client) { + updated := &multigresv1alpha1.MultigresCluster{} + err := c.Get( + t.Context(), + types.NamespacedName{Name: clusterName, Namespace: namespace}, + updated, + ) + if err == nil { + if controllerutil.ContainsFinalizer(updated, finalizerName) { + t.Error("Finalizer was not removed") + } + } + }, + }, + "Object Not Found (Clean Exit)": { + skipClusterCreation: true, + existingObjects: []client.Object{}, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Default to all standard templates if existingObjects is nil + objects := tc.existingObjects + if objects == nil { + objects = []client.Object{coreTpl, cellTpl, shardTpl} + } + + clientBuilder := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + WithStatusSubresource(&multigresv1alpha1.MultigresCluster{}, &multigresv1alpha1.Cell{}, &multigresv1alpha1.TableGroup{}) + baseClient := clientBuilder.Build() + + finalClient := baseClient + + // Apply defaults if no specific cluster is provided + cluster := tc.multigrescluster + if cluster == nil { + cluster = baseCluster.DeepCopy() + } + + // Apply pre-reconcile updates if defined + if tc.preReconcileUpdate != nil { + tc.preReconcileUpdate(t, cluster) + } + + shouldDelete := cluster.GetDeletionTimestamp() != nil && + !cluster.GetDeletionTimestamp().IsZero() + + if !strings.Contains(name, "Object Not Found") { + check := &multigresv1alpha1.MultigresCluster{} + err := baseClient.Get( + t.Context(), + types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, + check, + ) + if apierrors.IsNotFound(err) { + if err := baseClient.Create(t.Context(), cluster); err != nil { + t.Fatalf("failed to create initial cluster: %v", err) + } + + // Ensure DeletionTimestamp is set in the API if the test requires it. + // client.Create strips this field, so we must invoke Delete() to re-apply it. + if shouldDelete { + // 1. Refresh object to avoid ResourceVersion conflict + if err := baseClient.Get(t.Context(), types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, cluster); err != nil { + t.Fatalf("failed to refresh cluster before delete: %v", err) + } + // 2. Delete it + if err := baseClient.Delete(t.Context(), cluster); err != nil { + t.Fatalf("failed to set deletion timestamp: %v", err) + } + // 3. Refresh again to ensure the controller sees the deletion timestamp + if err := baseClient.Get(t.Context(), types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, cluster); err != nil { + t.Fatalf("failed to refresh cluster after deletion: %v", err) + } + } + } + } + + reconciler := &MultigresClusterReconciler{ + Client: finalClient, + Scheme: scheme, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: cluster.Name, + Namespace: cluster.Namespace, + }, + } + + _, err := reconciler.Reconcile(t.Context(), req) + if err != nil { + t.Errorf("Unexpected error from Reconcile: %v", err) + } + + if tc.validate != nil { + tc.validate(t, baseClient) + } + }) + } +} + +func TestMultigresClusterReconciler_Reconcile_Failure(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + coreTpl, cellTpl, shardTpl, baseCluster, clusterName, namespace, finalizerName := setupFixtures( + t, + ) + errBoom := errors.New("boom") + + tests := map[string]struct { + multigrescluster *multigresv1alpha1.MultigresCluster + existingObjects []client.Object + failureConfig *testutil.FailureConfig + preReconcileUpdate func(testing.TB, *multigresv1alpha1.MultigresCluster) + skipClusterCreation bool + validate func(testing.TB, client.Client) + }{ + "Delete: Block Finalization if Cells Exist": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + now := metav1.Now() + c.DeletionTimestamp = &now + c.Finalizers = []string{finalizerName} + }, + existingObjects: []client.Object{ + &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName + "-zone-a", + Namespace: namespace, + Labels: map[string]string{"multigres.com/cluster": clusterName}, + }, + }, + }, + }, + "Delete: Block Finalization if TableGroups Exist": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + now := metav1.Now() + c.DeletionTimestamp = &now + c.Finalizers = []string{finalizerName} + }, + existingObjects: []client.Object{ + &multigresv1alpha1.TableGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName + "-db1-tg1", + Namespace: namespace, + Labels: map[string]string{"multigres.com/cluster": clusterName}, + }, + }, + }, + }, + "Delete: Block Finalization if TopoServer Exists": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + now := metav1.Now() + c.DeletionTimestamp = &now + c.Finalizers = []string{finalizerName} + }, + existingObjects: []client.Object{ + &multigresv1alpha1.TopoServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName + "-global-topo", + Namespace: namespace, + Labels: map[string]string{"multigres.com/cluster": clusterName}, + }, + }, + }, + }, + "Error: Explicit Template Missing (Should Fail)": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.TemplateDefaults.CoreTemplate = "non-existent-template" + }, + existingObjects: []client.Object{}, // No templates exist + failureConfig: nil, // No API failure, just logical failure + }, + "Error: Explicit Cell Template Missing": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.Cells[0].CellTemplate = "missing-cell-tpl" + }, + existingObjects: []client.Object{coreTpl, shardTpl}, // Missing cellTpl + }, + "Error: Explicit Shard Template Missing": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.Databases[0].TableGroups[0].Shards[0].ShardTemplate = "missing-shard-tpl" + }, + existingObjects: []client.Object{coreTpl, cellTpl}, // Missing shardTpl + }, + "Error: Fetch Cluster Failed": { + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName(clusterName, errBoom), + }, + }, + "Error: Add Finalizer Failed": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Finalizers = nil // Ensure we trigger the Add Finalizer path + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnUpdate: testutil.FailOnObjectName(clusterName, errBoom), + }, + }, + "Error: Remove Finalizer Failed": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + now := metav1.Now() + c.DeletionTimestamp = &now + c.Finalizers = []string{finalizerName} + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnUpdate: testutil.FailOnObjectName(clusterName, errBoom), + }, + }, + "Error: CheckChildrenDeleted (List Cells Failed)": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + now := metav1.Now() + c.DeletionTimestamp = &now + c.Finalizers = []string{finalizerName} + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnList: func(list client.ObjectList) error { + if _, ok := list.(*multigresv1alpha1.CellList); ok { + return errBoom + } + return nil + }, + }, + }, + "Error: CheckChildrenDeleted (List TableGroups Failed)": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + now := metav1.Now() + c.DeletionTimestamp = &now + c.Finalizers = []string{finalizerName} + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnList: func(list client.ObjectList) error { + if _, ok := list.(*multigresv1alpha1.TableGroupList); ok { + return errBoom + } + return nil + }, + }, + }, + "Error: CheckChildrenDeleted (List TopoServers Failed)": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + now := metav1.Now() + c.DeletionTimestamp = &now + c.Finalizers = []string{finalizerName} + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnList: func(list client.ObjectList) error { + if _, ok := list.(*multigresv1alpha1.TopoServerList); ok { + return errBoom + } + return nil + }, + }, + }, + "Error: Resolve CoreTemplate Failed": { + existingObjects: []client.Object{coreTpl}, + failureConfig: &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("default-core", errBoom), + }, + }, + "Error: Resolve Admin Template Failed (Second Call)": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.TemplateDefaults.CoreTemplate = "" + c.Spec.GlobalTopoServer.TemplateRef = "topo-core" + c.Spec.MultiAdmin.TemplateRef = "admin-core-fail" + }, + existingObjects: []client.Object{ + cellTpl, shardTpl, + &multigresv1alpha1.CoreTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "topo-core", Namespace: namespace}, + // Minimal valid spec + Spec: multigresv1alpha1.CoreTemplateSpec{}, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("admin-core-fail", errBoom), + }, + }, + "Error: Create GlobalTopo Failed": { + failureConfig: &testutil.FailureConfig{ + OnCreate: testutil.FailOnObjectName(clusterName+"-global-topo", errBoom), + }, + }, + "Error: Create MultiAdmin Failed": { + failureConfig: &testutil.FailureConfig{ + OnCreate: testutil.FailOnObjectName(clusterName+"-multiadmin", errBoom), + }, + }, + "Error: Resolve CellTemplate Failed": { + failureConfig: &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("default-cell", errBoom), + }, + }, + "Error: List Existing Cells Failed (Reconcile Loop)": { + failureConfig: &testutil.FailureConfig{ + OnList: func(list client.ObjectList) error { + if _, ok := list.(*multigresv1alpha1.CellList); ok { + return errBoom + } + return nil + }, + }, + }, + "Error: Create Cell Failed": { + failureConfig: &testutil.FailureConfig{ + OnCreate: testutil.FailOnObjectName(clusterName+"-zone-a", errBoom), + }, + }, + "Error: Prune Cell Failed": { + existingObjects: []client.Object{ + coreTpl, cellTpl, shardTpl, + &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName + "-zone-b", + Namespace: namespace, + Labels: map[string]string{"multigres.com/cluster": clusterName}, + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnDelete: testutil.FailOnObjectName(clusterName+"-zone-b", errBoom), + }, + }, + "Error: List Existing TableGroups Failed": { + failureConfig: &testutil.FailureConfig{ + OnList: func(list client.ObjectList) error { + if _, ok := list.(*multigresv1alpha1.TableGroupList); ok { + return errBoom + } + return nil + }, + }, + }, + "Error: Resolve ShardTemplate Failed": { + failureConfig: &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("default-shard", errBoom), + }, + }, + "Error: Create TableGroup Failed": { + failureConfig: &testutil.FailureConfig{ + OnCreate: testutil.FailOnObjectName(clusterName+"-db1-tg1", errBoom), + }, + }, + "Error: Prune TableGroup Failed": { + existingObjects: []client.Object{ + coreTpl, cellTpl, shardTpl, + &multigresv1alpha1.TableGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName + "-orphan-tg", + Namespace: namespace, + Labels: map[string]string{"multigres.com/cluster": clusterName}, + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnDelete: testutil.FailOnObjectName(clusterName+"-orphan-tg", errBoom), + }, + }, + "Error: UpdateStatus (List Cells Failed)": { + failureConfig: &testutil.FailureConfig{ + OnList: func() func(client.ObjectList) error { + count := 0 + return func(list client.ObjectList) error { + if _, ok := list.(*multigresv1alpha1.CellList); ok { + count++ + if count > 1 { + return errBoom + } + } + return nil + } + }(), + }, + }, + "Error: UpdateStatus (List TableGroups Failed)": { + failureConfig: &testutil.FailureConfig{ + OnList: func() func(client.ObjectList) error { + count := 0 + return func(list client.ObjectList) error { + if _, ok := list.(*multigresv1alpha1.TableGroupList); ok { + count++ + if count > 1 { + return errBoom + } + } + return nil + } + }(), + }, + }, + "Error: Update Status Failed (API Error)": { + failureConfig: &testutil.FailureConfig{ + OnStatusUpdate: testutil.FailOnObjectName(clusterName, errBoom), + }, + }, + "Error: Global Topo Resolution Failed (During Cell Reconcile)": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.TemplateDefaults.CoreTemplate = "" + c.Spec.GlobalTopoServer.TemplateRef = "topo-fail-cells" + // Clear MultiAdmin to ensure predictable call counts + c.Spec.MultiAdmin = multigresv1alpha1.MultiAdminConfig{} + }, + existingObjects: []client.Object{ + cellTpl, shardTpl, + &multigresv1alpha1.CoreTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "topo-fail-cells", Namespace: namespace}, + Spec: multigresv1alpha1.CoreTemplateSpec{}, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnGet: func() func(client.ObjectKey) error { + count := 0 + return func(key client.ObjectKey) error { + if key.Name == "topo-fail-cells" { + count++ + // Call 1: reconcileGlobalComponents -> ResolveCoreTemplate (Succeeds to proceed) + // Call 2: reconcileCells -> getGlobalTopoRef -> ResolveCoreTemplate (Fails) + if count == 2 { + return errBoom + } + } + return nil + } + }(), + }, + }, + "Error: Global Topo Resolution Failed (During Database Reconcile)": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + c.Spec.TemplateDefaults.CoreTemplate = "" + c.Spec.GlobalTopoServer.TemplateRef = "topo-fail-db" + // Clear MultiAdmin to ensure predictable call counts + c.Spec.MultiAdmin = multigresv1alpha1.MultiAdminConfig{} + }, + existingObjects: []client.Object{ + cellTpl, shardTpl, + &multigresv1alpha1.CoreTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "topo-fail-db", Namespace: namespace}, + Spec: multigresv1alpha1.CoreTemplateSpec{}, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnGet: func() func(client.ObjectKey) error { + count := 0 + return func(key client.ObjectKey) error { + if key.Name == "topo-fail-db" { + count++ + // Call 1: reconcileGlobalComponents (Succeeds) + // Call 2: reconcileCells (Succeeds) + // Call 3: reconcileDatabases -> getGlobalTopoRef (Fails) + if count == 3 { + return errBoom + } + } + return nil + } + }(), + }, + }, + "Create: Long Names (Truncation Check)": { + preReconcileUpdate: func(t testing.TB, c *multigresv1alpha1.MultigresCluster) { + longName := strings.Repeat("a", 50) + c.Spec.Databases[0].Name = longName + c.Spec.Databases[0].TableGroups[0].Name = longName + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Default to all standard templates if existingObjects is nil + objects := tc.existingObjects + if objects == nil { + objects = []client.Object{coreTpl, cellTpl, shardTpl} + } + + clientBuilder := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + WithStatusSubresource(&multigresv1alpha1.MultigresCluster{}, &multigresv1alpha1.Cell{}, &multigresv1alpha1.TableGroup{}) + baseClient := clientBuilder.Build() + + var finalClient client.Client + finalClient = client.Client(baseClient) + if tc.failureConfig != nil { + finalClient = testutil.NewFakeClientWithFailures(baseClient, tc.failureConfig) + } + + // Apply defaults if no specific cluster is provided + cluster := tc.multigrescluster + if cluster == nil { + cluster = baseCluster.DeepCopy() + } + + // Apply pre-reconcile updates if defined + if tc.preReconcileUpdate != nil { + tc.preReconcileUpdate(t, cluster) + } + + shouldDelete := cluster.GetDeletionTimestamp() != nil && + !cluster.GetDeletionTimestamp().IsZero() + + if !strings.Contains(name, "Object Not Found") { + check := &multigresv1alpha1.MultigresCluster{} + err := baseClient.Get( + t.Context(), + types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, + check, + ) + if apierrors.IsNotFound(err) { + if err := baseClient.Create(t.Context(), cluster); err != nil { + t.Fatalf("failed to create initial cluster: %v", err) + } + + // Ensure DeletionTimestamp is set in the API if the test requires it. + // client.Create strips this field, so we must invoke Delete() to re-apply it. + if shouldDelete { + // 1. Refresh object to avoid ResourceVersion conflict + if err := baseClient.Get(t.Context(), types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, cluster); err != nil { + t.Fatalf("failed to refresh cluster before delete: %v", err) + } + // 2. Delete it + if err := baseClient.Delete(t.Context(), cluster); err != nil { + t.Fatalf("failed to set deletion timestamp: %v", err) + } + // 3. Refresh again to ensure the controller sees the deletion timestamp + if err := baseClient.Get(t.Context(), types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, cluster); err != nil { + t.Fatalf("failed to refresh cluster after deletion: %v", err) + } + } + } + } + + reconciler := &MultigresClusterReconciler{ + Client: finalClient, + Scheme: scheme, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: cluster.Name, + Namespace: cluster.Namespace, + }, + } + + _, err := reconciler.Reconcile(t.Context(), req) + if err == nil { + t.Error("Expected error from Reconcile, got nil") + } + + if tc.validate != nil { + tc.validate(t, baseClient) + } + }) + } +} + +func TestSetupWithManager_Coverage(t *testing.T) { + t.Parallel() + + // Test the default path (no options) + t.Run("No Options", func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Logf("Recovered expected panic: %v", r) + } + }() + reconciler := &MultigresClusterReconciler{} + _ = reconciler.SetupWithManager(nil) + }) + + // Test the path with options to ensure coverage of the 'if len(opts) > 0' block + t.Run("With Options", func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Logf("Recovered expected panic: %v", r) + } + }() + reconciler := &MultigresClusterReconciler{} + _ = reconciler.SetupWithManager(nil, controller.Options{MaxConcurrentReconciles: 1}) + }) +} + +func TestTemplateLogic_Unit(t *testing.T) { + t.Parallel() + + t.Run("MergeCellConfig", func(t *testing.T) { + t.Parallel() + + tpl := &multigresv1alpha1.CellTemplate{ + Spec: multigresv1alpha1.CellTemplateSpec{ + MultiGateway: &multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(1)), + PodAnnotations: map[string]string{"foo": "bar"}, + PodLabels: map[string]string{"l1": "v1"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: parseQty("100m")}, + }, + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{}, + }, + }, + LocalTopoServer: &multigresv1alpha1.LocalTopoServerSpec{ + Etcd: &multigresv1alpha1.EtcdSpec{Image: "base"}, + }, + }, + } + overrides := &multigresv1alpha1.CellOverrides{ + MultiGateway: &multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(2)), + PodAnnotations: map[string]string{"baz": "qux"}, + PodLabels: map[string]string{"l2": "v2"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceMemory: parseQty("1Gi")}, + }, + Affinity: &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{}, + }, + }, + } + + gw, topo := MergeCellConfig(tpl, overrides, nil) + + wantGw := multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(2)), + PodAnnotations: map[string]string{"foo": "bar", "baz": "qux"}, + PodLabels: map[string]string{"l1": "v1", "l2": "v2"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceMemory: parseQty("1Gi")}, + }, + Affinity: &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{}, + }, + } + + // Use IgnoreUnexported to handle resource.Quantity fields + if diff := cmp.Diff(wantGw, gw, cmpopts.IgnoreUnexported(resource.Quantity{})); diff != "" { + t.Errorf("MergeCellConfig gateway mismatch (-want +got):\n%s", diff) + } + + wantTopo := &multigresv1alpha1.LocalTopoServerSpec{ + Etcd: &multigresv1alpha1.EtcdSpec{Image: "base"}, + } + if diff := cmp.Diff(wantTopo, topo); diff != "" { + t.Errorf("MergeCellConfig topo mismatch (-want +got):\n%s", diff) + } + + inline := &multigresv1alpha1.CellInlineSpec{ + MultiGateway: multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(99))}, + } + gw, _ = MergeCellConfig(tpl, overrides, inline) + if got, want := *gw.Replicas, int32(99); got != want { + t.Errorf("MergeCellConfig inline priority mismatch got %d, want %d", got, want) + } + + gw, _ = MergeCellConfig(nil, overrides, nil) + if got, want := *gw.Replicas, int32(2); got != want { + t.Errorf("MergeCellConfig nil template mismatch got %d, want %d", got, want) + } + + tplNil := &multigresv1alpha1.CellTemplate{ + Spec: multigresv1alpha1.CellTemplateSpec{ + MultiGateway: &multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(1))}, + }, + } + gw, _ = MergeCellConfig(tplNil, overrides, nil) + if got, want := gw.PodAnnotations["baz"], "qux"; got != want { + t.Errorf("MergeCellConfig nil map init mismatch got %q, want %q", got, want) + } + }) + + t.Run("MergeShardConfig", func(t *testing.T) { + t.Parallel() + + tpl := &multigresv1alpha1.ShardTemplate{ + Spec: multigresv1alpha1.ShardTemplateSpec{ + MultiOrch: &multigresv1alpha1.MultiOrchSpec{ + StatelessSpec: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(1)), + }, + Cells: []multigresv1alpha1.CellName{"a"}, + }, + Pools: map[string]multigresv1alpha1.PoolSpec{ + "p1": { + Type: "readOnly", + ReplicasPerCell: ptr.To(int32(1)), + Storage: multigresv1alpha1.StorageSpec{Size: "1Gi"}, + Postgres: multigresv1alpha1.ContainerConfig{ + Resources: corev1.ResourceRequirements{}, + }, + }, + }, + }, + } + + overrides := &multigresv1alpha1.ShardOverrides{ + MultiOrch: &multigresv1alpha1.MultiOrchSpec{ + Cells: []multigresv1alpha1.CellName{"b"}, + }, + Pools: map[string]multigresv1alpha1.PoolSpec{ + "p1": { + Type: "readWrite", // Added Type override here to hit coverage + ReplicasPerCell: ptr.To(int32(2)), + Storage: multigresv1alpha1.StorageSpec{Size: "10Gi"}, + Postgres: multigresv1alpha1.ContainerConfig{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: parseQty("1")}, + }, + }, + Multipooler: multigresv1alpha1.ContainerConfig{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: parseQty("1")}, + }, + }, + Affinity: &corev1.Affinity{PodAntiAffinity: &corev1.PodAntiAffinity{}}, + Cells: []multigresv1alpha1.CellName{"c2"}, + }, + "p2": {Type: "write"}, + }, + } + + orch, pools := MergeShardConfig(tpl, overrides, nil) + + wantOrchCells := []multigresv1alpha1.CellName{"b"} + if diff := cmp.Diff(wantOrchCells, orch.Cells); diff != "" { + t.Errorf("MergeShardConfig MultiOrch cells mismatch (-want +got):\n%s", diff) + } + + p1 := pools["p1"] + wantP1 := multigresv1alpha1.PoolSpec{ + Type: "readWrite", + ReplicasPerCell: ptr.To(int32(2)), + Storage: multigresv1alpha1.StorageSpec{Size: "10Gi"}, + Postgres: multigresv1alpha1.ContainerConfig{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: parseQty("1")}, + }, + }, + Multipooler: multigresv1alpha1.ContainerConfig{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: parseQty("1")}, + }, + }, + Affinity: &corev1.Affinity{PodAntiAffinity: &corev1.PodAntiAffinity{}}, + Cells: []multigresv1alpha1.CellName{"c2"}, + } + + if diff := cmp.Diff(wantP1, p1, cmpopts.IgnoreUnexported(resource.Quantity{})); diff != "" { + t.Errorf("MergeShardConfig Pool p1 mismatch (-want +got):\n%s", diff) + } + + inline := &multigresv1alpha1.ShardInlineSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []multigresv1alpha1.CellName{"inline"}, + }, + } + orch, _ = MergeShardConfig(tpl, overrides, inline) + wantInlineCells := []multigresv1alpha1.CellName{"inline"} + if diff := cmp.Diff(wantInlineCells, orch.Cells); diff != "" { + t.Errorf("MergeShardConfig inline priority mismatch (-want +got):\n%s", diff) + } + }) + + t.Run("ResolveGlobalTopo", func(t *testing.T) { + t.Parallel() + + spec := &multigresv1alpha1.GlobalTopoServerSpec{TemplateRef: "t1"} + core := &multigresv1alpha1.CoreTemplate{ + Spec: multigresv1alpha1.CoreTemplateSpec{ + GlobalTopoServer: &multigresv1alpha1.TopoServerSpec{ + Etcd: &multigresv1alpha1.EtcdSpec{Image: "resolved"}, + }, + }, + } + res := ResolveGlobalTopo(spec, core) + if got, want := res.Etcd.Image, "resolved"; got != want { + t.Errorf("ResolveGlobalTopo template mismatch got %q, want %q", got, want) + } + + spec2 := &multigresv1alpha1.GlobalTopoServerSpec{ + TemplateRef: "t1", + Etcd: &multigresv1alpha1.EtcdSpec{Image: "inline"}, + } + res2 := ResolveGlobalTopo(spec2, nil) + if got, want := res2.Etcd.Image, "inline"; got != want { + t.Errorf("ResolveGlobalTopo inline fallback mismatch got %q, want %q", got, want) + } + + spec4 := &multigresv1alpha1.GlobalTopoServerSpec{} + res4 := ResolveGlobalTopo(spec4, nil) + if diff := cmp.Diff(spec4, res4); diff != "" { + t.Errorf("ResolveGlobalTopo no-op mismatch (-want +got):\n%s", diff) + } + }) + + t.Run("ResolveMultiAdmin", func(t *testing.T) { + t.Parallel() + + spec := &multigresv1alpha1.MultiAdminConfig{TemplateRef: "t1"} + core := &multigresv1alpha1.CoreTemplate{ + Spec: multigresv1alpha1.CoreTemplateSpec{ + MultiAdmin: &multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(10))}, + }, + } + res := ResolveMultiAdmin(spec, core) + if got, want := *res.Replicas, int32(10); got != want { + t.Errorf("ResolveMultiAdmin template mismatch got %d, want %d", got, want) + } + + spec2 := &multigresv1alpha1.MultiAdminConfig{ + TemplateRef: "t1", + Spec: &multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(5))}, + } + res2 := ResolveMultiAdmin(spec2, nil) + if got, want := *res2.Replicas, int32(5); got != want { + t.Errorf("ResolveMultiAdmin inline fallback mismatch got %d, want %d", got, want) + } + + res3 := ResolveMultiAdmin(&multigresv1alpha1.MultiAdminConfig{}, nil) + if res3 != nil { + t.Error("ResolveMultiAdmin expected nil for empty config") + } + + spec5 := &multigresv1alpha1.MultiAdminConfig{} + res5 := ResolveMultiAdmin(spec5, nil) + if res5 != nil { + t.Error("ResolveMultiAdmin expected nil when no config and no template") + } + }) +} + +func parseQty(s string) resource.Quantity { + return resource.MustParse(s) +} diff --git a/pkg/cluster-handler/controller/multigrescluster/template_logic.go b/pkg/cluster-handler/controller/multigrescluster/template_logic.go new file mode 100644 index 00000000..c585ca92 --- /dev/null +++ b/pkg/cluster-handler/controller/multigrescluster/template_logic.go @@ -0,0 +1,302 @@ +package multigrescluster + +import ( + "context" + "fmt" + "reflect" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// NOTE: We may want to consider move this to different module/package before implementing the Mutating Webhook. +// This separation is critical to prevent circular dependencies between the Webhook and Controller packages +// and ensures that the "Level 4" defaulting logic is reusable as a Single Source of Truth for both the reconciliation loop +// and admission requests. + +// TemplateResolver handles the logic for fetching and merging templates. +type TemplateResolver struct { + // Client is the kubernetes client used to fetch templates. + Client client.Client + // Namespace is the namespace where templates are expected to exist. + Namespace string + // Defaults contains the cluster-level template references to use when explicit ones are missing. + Defaults multigresv1alpha1.TemplateDefaults +} + +// ResolveCoreTemplate determines the target CoreTemplate name and fetches it. +// +// If templateName is empty, it uses the following precedence: +// 1. The cluster-level default defined in TemplateDefaults. +// 2. A CoreTemplate named "default" found in the same namespace where MultigresCluster is deployed. +// +// If an explicit template (param or cluster default) is not found, it returns an error. +// If the implicit "default" template is not found, it returns an empty object (safe fallback). +// In this case the default would be applied by the operator via mutating webhook. +func (r *TemplateResolver) ResolveCoreTemplate( + ctx context.Context, + templateName string, +) (*multigresv1alpha1.CoreTemplate, error) { + name := templateName + isImplicitFallback := false + + if name == "" { + name = r.Defaults.CoreTemplate + } + if name == "" { + name = FallbackCoreTemplate + isImplicitFallback = true + } + + tpl := &multigresv1alpha1.CoreTemplate{} + err := r.Client.Get(ctx, types.NamespacedName{Name: name, Namespace: r.Namespace}, tpl) + if err != nil { + if errors.IsNotFound(err) { + if isImplicitFallback { + return &multigresv1alpha1.CoreTemplate{}, nil + } + return nil, fmt.Errorf("referenced CoreTemplate '%s' not found", name) + } + return nil, fmt.Errorf("failed to get CoreTemplate: %w", err) + } + return tpl, nil +} + +// ResolveCellTemplate fetches and resolves a CellTemplate by name, handling defaults. +func (r *TemplateResolver) ResolveCellTemplate( + ctx context.Context, + templateName string, +) (*multigresv1alpha1.CellTemplate, error) { + name := templateName + isImplicitFallback := false + + if name == "" { + name = r.Defaults.CellTemplate + } + if name == "" { + name = FallbackCellTemplate + isImplicitFallback = true + } + + tpl := &multigresv1alpha1.CellTemplate{} + err := r.Client.Get(ctx, types.NamespacedName{Name: name, Namespace: r.Namespace}, tpl) + if err != nil { + if errors.IsNotFound(err) { + if isImplicitFallback { + return &multigresv1alpha1.CellTemplate{}, nil + } + return nil, fmt.Errorf("referenced CellTemplate '%s' not found", name) + } + return nil, fmt.Errorf("failed to get CellTemplate: %w", err) + } + return tpl, nil +} + +// ResolveShardTemplate fetches and resolves a ShardTemplate by name, handling defaults. +func (r *TemplateResolver) ResolveShardTemplate( + ctx context.Context, + templateName string, +) (*multigresv1alpha1.ShardTemplate, error) { + name := templateName + isImplicitFallback := false + + if name == "" { + name = r.Defaults.ShardTemplate + } + if name == "" { + name = FallbackShardTemplate + isImplicitFallback = true + } + + tpl := &multigresv1alpha1.ShardTemplate{} + err := r.Client.Get(ctx, types.NamespacedName{Name: name, Namespace: r.Namespace}, tpl) + if err != nil { + if errors.IsNotFound(err) { + if isImplicitFallback { + return &multigresv1alpha1.ShardTemplate{}, nil + } + return nil, fmt.Errorf("referenced ShardTemplate '%s' not found", name) + } + return nil, fmt.Errorf("failed to get ShardTemplate: %w", err) + } + return tpl, nil +} + +// MergeCellConfig merges a template spec with overrides and an inline spec to produce the final configuration. +func MergeCellConfig( + template *multigresv1alpha1.CellTemplate, + overrides *multigresv1alpha1.CellOverrides, + inline *multigresv1alpha1.CellInlineSpec, +) (multigresv1alpha1.StatelessSpec, *multigresv1alpha1.LocalTopoServerSpec) { + var gateway multigresv1alpha1.StatelessSpec + var localTopo *multigresv1alpha1.LocalTopoServerSpec + + if template != nil { + if template.Spec.MultiGateway != nil { + gateway = *template.Spec.MultiGateway.DeepCopy() + } + if template.Spec.LocalTopoServer != nil { + localTopo = template.Spec.LocalTopoServer.DeepCopy() + } + } + + if overrides != nil { + if overrides.MultiGateway != nil { + mergeStatelessSpec(&gateway, overrides.MultiGateway) + } + } + + if inline != nil { + return inline.MultiGateway, inline.LocalTopoServer + } + + return gateway, localTopo +} + +// MergeShardConfig merges a template spec with overrides and an inline spec to produce the final configuration. +func MergeShardConfig( + template *multigresv1alpha1.ShardTemplate, + overrides *multigresv1alpha1.ShardOverrides, + inline *multigresv1alpha1.ShardInlineSpec, +) (multigresv1alpha1.MultiOrchSpec, map[string]multigresv1alpha1.PoolSpec) { + if inline != nil { + return inline.MultiOrch, inline.Pools + } + + var multiOrch multigresv1alpha1.MultiOrchSpec + pools := make(map[string]multigresv1alpha1.PoolSpec) + + if template != nil { + if template.Spec.MultiOrch != nil { + multiOrch = *template.Spec.MultiOrch.DeepCopy() + } + for k, v := range template.Spec.Pools { + pools[k] = *v.DeepCopy() + } + } + + if overrides != nil { + if overrides.MultiOrch != nil { + mergeMultiOrchSpec(&multiOrch, overrides.MultiOrch) + } + + for k, v := range overrides.Pools { + if existingPool, exists := pools[k]; exists { + mergedPool := mergePoolSpec(existingPool, v) + pools[k] = mergedPool + } else { + pools[k] = v + } + } + } + + return multiOrch, pools +} + +func mergeStatelessSpec( + base *multigresv1alpha1.StatelessSpec, + override *multigresv1alpha1.StatelessSpec, +) { + if override.Replicas != nil { + base.Replicas = override.Replicas + } + if !reflect.DeepEqual(override.Resources, corev1.ResourceRequirements{}) { + base.Resources = override.Resources + } + if override.Affinity != nil { + base.Affinity = override.Affinity + } + + for k, v := range override.PodAnnotations { + if base.PodAnnotations == nil { + base.PodAnnotations = make(map[string]string) + } + base.PodAnnotations[k] = v + } + for k, v := range override.PodLabels { + if base.PodLabels == nil { + base.PodLabels = make(map[string]string) + } + base.PodLabels[k] = v + } +} + +func mergeMultiOrchSpec( + base *multigresv1alpha1.MultiOrchSpec, + override *multigresv1alpha1.MultiOrchSpec, +) { + mergeStatelessSpec(&base.StatelessSpec, &override.StatelessSpec) + if len(override.Cells) > 0 { + base.Cells = override.Cells + } +} + +func mergePoolSpec( + base multigresv1alpha1.PoolSpec, + override multigresv1alpha1.PoolSpec, +) multigresv1alpha1.PoolSpec { + out := base + if override.Type != "" { + out.Type = override.Type + } + if len(override.Cells) > 0 { + out.Cells = override.Cells + } + if override.ReplicasPerCell != nil { + out.ReplicasPerCell = override.ReplicasPerCell + } + if override.Storage.Size != "" { + out.Storage = override.Storage + } + if !reflect.DeepEqual(override.Postgres.Resources, corev1.ResourceRequirements{}) { + out.Postgres.Resources = override.Postgres.Resources + } + if !reflect.DeepEqual(override.Multipooler.Resources, corev1.ResourceRequirements{}) { + out.Multipooler.Resources = override.Multipooler.Resources + } + if override.Affinity != nil { + out.Affinity = override.Affinity + } + return out +} + +// ResolveGlobalTopo determines the final GlobalTopoServer configuration by preferring inline config over templates. +func ResolveGlobalTopo( + spec *multigresv1alpha1.GlobalTopoServerSpec, + coreTemplate *multigresv1alpha1.CoreTemplate, +) *multigresv1alpha1.GlobalTopoServerSpec { + // If inline config is present, use it. + if spec.Etcd != nil || spec.External != nil { + return spec + } + + // Otherwise, use the template (loaded by caller based on TemplateRef or Defaults) + if coreTemplate != nil && coreTemplate.Spec.GlobalTopoServer != nil { + return &multigresv1alpha1.GlobalTopoServerSpec{ + Etcd: coreTemplate.Spec.GlobalTopoServer.Etcd, + } + } + + return spec +} + +// ResolveMultiAdmin determines the final MultiAdmin configuration by preferring inline config over templates. +func ResolveMultiAdmin( + spec *multigresv1alpha1.MultiAdminConfig, + coreTemplate *multigresv1alpha1.CoreTemplate, +) *multigresv1alpha1.StatelessSpec { + // If inline spec is present, use it. + if spec.Spec != nil { + return spec.Spec + } + + // Otherwise, use the template (loaded by caller based on TemplateRef or Defaults) + if coreTemplate != nil && coreTemplate.Spec.MultiAdmin != nil { + return coreTemplate.Spec.MultiAdmin + } + + return nil +} diff --git a/pkg/cluster-handler/controller/tablegroup/integration_test.go b/pkg/cluster-handler/controller/tablegroup/integration_test.go new file mode 100644 index 00000000..e1db8142 --- /dev/null +++ b/pkg/cluster-handler/controller/tablegroup/integration_test.go @@ -0,0 +1,262 @@ +//go:build integration +// +build integration + +package tablegroup_test + +import ( + "path/filepath" + "testing" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/cluster-handler/controller/tablegroup" + "github.com/numtide/multigres-operator/pkg/testutil" +) + +func TestSetupWithManager(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + mgr := testutil.SetUpEnvtestManager(t, scheme, + testutil.WithCRDPaths( + filepath.Join("../../../../", "config", "crd", "bases"), + ), + ) + + if err := (&tablegroup.TableGroupReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr, controller.Options{ + SkipNameValidation: ptr.To(true), + }); err != nil { + t.Fatalf("Failed to create controller, %v", err) + } +} + +func TestTableGroupReconciliation(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + tests := map[string]struct { + tableGroup *multigresv1alpha1.TableGroup + wantResources []client.Object + }{ + "simple tablegroup creates shards": { + tableGroup: &multigresv1alpha1.TableGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tg-test-simple", + Namespace: "default", + Labels: map[string]string{ + "multigres.com/cluster": "test-cluster", + "multigres.com/database": "db1", + "multigres.com/tablegroup": "tg1", + }, + }, + Spec: multigresv1alpha1.TableGroupSpec{ + DatabaseName: "db1", + TableGroupName: "tg1", + Images: multigresv1alpha1.ShardImages{ + MultiOrch: "orch:latest", + MultiPooler: "pooler:latest", + Postgres: "postgres:15", + }, + GlobalTopoServer: multigresv1alpha1.GlobalTopoServerRef{ + Address: "etcd:2379", + RootPath: "/multigres/global", + Implementation: "etcd2", + }, + Shards: []multigresv1alpha1.ShardResolvedSpec{ + { + Name: "s1", + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + StatelessSpec: multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(1))}, + Cells: []multigresv1alpha1.CellName{"zone-a"}, + }, + Pools: map[string]multigresv1alpha1.PoolSpec{ + "primary": { + Type: "readWrite", + ReplicasPerCell: ptr.To(int32(1)), + Cells: []multigresv1alpha1.CellName{"zone-a"}, + }, + }, + }, + { + Name: "s2", + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + StatelessSpec: multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(1))}, + Cells: []multigresv1alpha1.CellName{"zone-b"}, + }, + Pools: map[string]multigresv1alpha1.PoolSpec{ + "primary": { + Type: "readWrite", + ReplicasPerCell: ptr.To(int32(1)), + Cells: []multigresv1alpha1.CellName{"zone-b"}, + }, + }, + }, + }, + }, + }, + wantResources: []client.Object{ + // Shard 1 + &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tg-test-simple-s1", + Namespace: "default", + Labels: map[string]string{ + "multigres.com/cluster": "test-cluster", + "multigres.com/database": "db1", + "multigres.com/tablegroup": "tg1", + "multigres.com/shard": "s1", + }, + OwnerReferences: tgOwnerRefs(t, "tg-test-simple"), + }, + Spec: multigresv1alpha1.ShardSpec{ + ShardName: "s1", + DatabaseName: "db1", + TableGroupName: "tg1", + Images: multigresv1alpha1.ShardImages{ + MultiOrch: "orch:latest", + MultiPooler: "pooler:latest", + Postgres: "postgres:15", + }, + GlobalTopoServer: multigresv1alpha1.GlobalTopoServerRef{ + Address: "etcd:2379", + RootPath: "/multigres/global", + Implementation: "etcd2", + }, + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + StatelessSpec: multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(1))}, + Cells: []multigresv1alpha1.CellName{"zone-a"}, + }, + Pools: map[string]multigresv1alpha1.PoolSpec{ + "primary": { + Type: "readWrite", + ReplicasPerCell: ptr.To(int32(1)), + Cells: []multigresv1alpha1.CellName{"zone-a"}, + }, + }, + }, + }, + // Shard 2 + &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tg-test-simple-s2", + Namespace: "default", + Labels: map[string]string{ + "multigres.com/cluster": "test-cluster", + "multigres.com/database": "db1", + "multigres.com/tablegroup": "tg1", + "multigres.com/shard": "s2", + }, + OwnerReferences: tgOwnerRefs(t, "tg-test-simple"), + }, + Spec: multigresv1alpha1.ShardSpec{ + ShardName: "s2", + DatabaseName: "db1", + TableGroupName: "tg1", + Images: multigresv1alpha1.ShardImages{ + MultiOrch: "orch:latest", + MultiPooler: "pooler:latest", + Postgres: "postgres:15", + }, + GlobalTopoServer: multigresv1alpha1.GlobalTopoServerRef{ + Address: "etcd:2379", + RootPath: "/multigres/global", + Implementation: "etcd2", + }, + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + StatelessSpec: multigresv1alpha1.StatelessSpec{Replicas: ptr.To(int32(1))}, + Cells: []multigresv1alpha1.CellName{"zone-b"}, + }, + Pools: map[string]multigresv1alpha1.PoolSpec{ + "primary": { + Type: "readWrite", + ReplicasPerCell: ptr.To(int32(1)), + Cells: []multigresv1alpha1.CellName{"zone-b"}, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + ctx := t.Context() + + // 1. Setup Envtest and Manager + mgr := testutil.SetUpEnvtestManager(t, scheme, + testutil.WithCRDPaths( + filepath.Join("../../../../", "config", "crd", "bases"), + ), + ) + + // 2. Setup Watcher + watcher := testutil.NewResourceWatcher(t, ctx, mgr, + testutil.WithCmpOpts( + testutil.IgnoreMetaRuntimeFields(), + ), + testutil.WithExtraResource( + &multigresv1alpha1.TableGroup{}, + &multigresv1alpha1.Shard{}, + ), + testutil.WithTimeout(10*time.Second), + ) + k8sClient := mgr.GetClient() + + // 3. Setup and Start Controller + reconciler := &tablegroup.TableGroupReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + } + + if err := reconciler.SetupWithManager(mgr, controller.Options{ + SkipNameValidation: ptr.To(true), + }); err != nil { + t.Fatalf("Failed to create controller, %v", err) + } + + // 4. Create the Input + if err := k8sClient.Create(ctx, tc.tableGroup); err != nil { + t.Fatalf("Failed to create the initial tablegroup, %v", err) + } + + // 5. Assert Logic + if err := watcher.WaitForMatch(tc.wantResources...); err != nil { + t.Errorf("Resources mismatch:\n%v", err) + } + }) + } +} + +// Helpers + +func tgOwnerRefs(t testing.TB, tgName string) []metav1.OwnerReference { + t.Helper() + return []metav1.OwnerReference{{ + APIVersion: "multigres.com/v1alpha1", + Kind: "TableGroup", + Name: tgName, + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }} +} diff --git a/pkg/cluster-handler/controller/tablegroup/tablegroup_controller.go b/pkg/cluster-handler/controller/tablegroup/tablegroup_controller.go new file mode 100644 index 00000000..7cd1627b --- /dev/null +++ b/pkg/cluster-handler/controller/tablegroup/tablegroup_controller.go @@ -0,0 +1,166 @@ +package tablegroup + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +// TableGroupReconciler reconciles a TableGroup object. +type TableGroupReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// Reconcile reads the state of the TableGroup and ensures its child Shards are in the desired state. +// +// +kubebuilder:rbac:groups=multigres.com,resources=tablegroups,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=multigres.com,resources=tablegroups/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=multigres.com,resources=shards,verbs=get;list;watch;create;update;patch;delete +func (r *TableGroupReconciler) Reconcile( + ctx context.Context, + req ctrl.Request, +) (ctrl.Result, error) { + l := log.FromContext(ctx) + + tg := &multigresv1alpha1.TableGroup{} + err := r.Get(ctx, req.NamespacedName, tg) + if err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, fmt.Errorf("failed to get TableGroup: %w", err) + } + + activeShardNames := make(map[string]bool, len(tg.Spec.Shards)) + + for _, shardSpec := range tg.Spec.Shards { + shardNameFull := fmt.Sprintf("%s-%s", tg.Name, shardSpec.Name) + activeShardNames[shardNameFull] = true + + shardCR := &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: shardNameFull, + Namespace: tg.Namespace, + Labels: map[string]string{ + "multigres.com/cluster": tg.Labels["multigres.com/cluster"], + "multigres.com/database": tg.Spec.DatabaseName, + "multigres.com/tablegroup": tg.Spec.TableGroupName, + "multigres.com/shard": shardSpec.Name, + }, + }, + } + + if _, err := controllerutil.CreateOrUpdate(ctx, r.Client, shardCR, func() error { + shardCR.Spec.DatabaseName = tg.Spec.DatabaseName + shardCR.Spec.TableGroupName = tg.Spec.TableGroupName + shardCR.Spec.ShardName = shardSpec.Name + shardCR.Spec.Images = tg.Spec.Images + shardCR.Spec.GlobalTopoServer = tg.Spec.GlobalTopoServer + shardCR.Spec.MultiOrch = shardSpec.MultiOrch + shardCR.Spec.Pools = shardSpec.Pools + + return controllerutil.SetControllerReference(tg, shardCR, r.Scheme) + }); err != nil { + l.Error(err, "Failed to create/update shard", "shard", shardNameFull) + return ctrl.Result{}, fmt.Errorf("failed to create/update shard: %w", err) + } + } + + // Prune orphan Shards + existingShards := &multigresv1alpha1.ShardList{} + if err := r.List(ctx, existingShards, client.InNamespace(tg.Namespace), client.MatchingLabels{ + "multigres.com/cluster": tg.Labels["multigres.com/cluster"], + "multigres.com/database": tg.Spec.DatabaseName, + "multigres.com/tablegroup": tg.Spec.TableGroupName, + }); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to list shards for pruning: %w", err) + } + + for _, s := range existingShards.Items { + if !activeShardNames[s.Name] { + if err := r.Delete(ctx, &s); err != nil { + return ctrl.Result{}, fmt.Errorf( + "failed to delete orphan shard '%s': %w", + s.Name, + err, + ) + } + } + } + + // Update Status + total := int32(len(tg.Spec.Shards)) + ready := int32(0) + + // Re-list to check status + if err := r.List(ctx, existingShards, client.InNamespace(tg.Namespace), client.MatchingLabels{ + "multigres.com/cluster": tg.Labels["multigres.com/cluster"], + "multigres.com/database": tg.Spec.DatabaseName, + "multigres.com/tablegroup": tg.Spec.TableGroupName, + }); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to list shards for status: %w", err) + } + + for _, s := range existingShards.Items { + for _, cond := range s.Status.Conditions { + if cond.Type == "Available" && cond.Status == "True" { + ready++ + break + } + } + } + + tg.Status.TotalShards = total + tg.Status.ReadyShards = ready + + condStatus := metav1.ConditionFalse + if ready == total && total > 0 { + condStatus = metav1.ConditionTrue + } else if total == 0 { + condStatus = metav1.ConditionTrue + } + + meta.SetStatusCondition(&tg.Status.Conditions, metav1.Condition{ + Type: "Available", + Status: condStatus, + Reason: "ShardsReady", + Message: fmt.Sprintf("%d/%d shards ready", ready, total), + LastTransitionTime: metav1.Now(), + }) + + if err := r.Status().Update(ctx, tg); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update status: %w", err) + } + + return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *TableGroupReconciler) SetupWithManager( + mgr ctrl.Manager, + opts ...controller.Options, +) error { + controllerOpts := controller.Options{} + if len(opts) > 0 { + controllerOpts = opts[0] + } + + return ctrl.NewControllerManagedBy(mgr). + For(&multigresv1alpha1.TableGroup{}). + Owns(&multigresv1alpha1.Shard{}). + WithOptions(controllerOpts). + Complete(r) +} diff --git a/pkg/cluster-handler/controller/tablegroup/tablegroup_controller_test.go b/pkg/cluster-handler/controller/tablegroup/tablegroup_controller_test.go new file mode 100644 index 00000000..62cafe44 --- /dev/null +++ b/pkg/cluster-handler/controller/tablegroup/tablegroup_controller_test.go @@ -0,0 +1,432 @@ +package tablegroup + +import ( + "errors" + "fmt" + "testing" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/testutil" +) + +func setupFixtures( + t testing.TB, +) (*multigresv1alpha1.TableGroup, string, string, string, string, string) { + t.Helper() + + tgName := "test-tg" + namespace := "default" + clusterName := "test-cluster" + dbName := "db1" + tgLabelName := "tg1" + + baseTG := &multigresv1alpha1.TableGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: tgName, + Namespace: namespace, + Labels: map[string]string{ + "multigres.com/cluster": clusterName, + "multigres.com/database": dbName, + "multigres.com/tablegroup": tgLabelName, + }, + }, + Spec: multigresv1alpha1.TableGroupSpec{ + DatabaseName: dbName, + TableGroupName: tgLabelName, + Images: multigresv1alpha1.ShardImages{ + MultiOrch: "orch:v1", + MultiPooler: "pooler:v1", + Postgres: "pg:15", + }, + GlobalTopoServer: multigresv1alpha1.GlobalTopoServerRef{ + Address: "http://etcd:2379", + }, + Shards: []multigresv1alpha1.ShardResolvedSpec{ + { + Name: "shard-0", + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + StatelessSpec: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(1)), + }, + }, + Pools: map[string]multigresv1alpha1.PoolSpec{ + "data": {ReplicasPerCell: ptr.To(int32(2))}, + }, + }, + }, + }, + } + return baseTG, tgName, namespace, clusterName, dbName, tgLabelName +} + +func TestTableGroupReconciler_Reconcile_Success(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + baseTG, tgName, namespace, clusterName, dbName, tgLabelName := setupFixtures(t) + + tests := map[string]struct { + tableGroup *multigresv1alpha1.TableGroup + existingObjects []client.Object + preReconcileUpdate func(testing.TB, *multigresv1alpha1.TableGroup) + skipCreate bool // If true, the object won't be created in the fake client (simulates Not Found) + validate func(testing.TB, client.Client) + }{ + "Create: Shard Creation": { + tableGroup: baseTG.DeepCopy(), + existingObjects: []client.Object{}, + validate: func(t testing.TB, c client.Client) { + ctx := t.Context() + shardNameFull := fmt.Sprintf("%s-%s", tgName, "shard-0") + shard := &multigresv1alpha1.Shard{} + if err := c.Get(ctx, types.NamespacedName{Name: shardNameFull, Namespace: namespace}, shard); err != nil { + t.Fatalf("Shard %s not created: %v", shardNameFull, err) + } + if got, want := shard.Spec.DatabaseName, dbName; got != want { + t.Errorf("Shard DB name mismatch got %q, want %q", got, want) + } + }, + }, + "Update: Apply Changes and Prune Orphans": { + tableGroup: baseTG.DeepCopy(), + preReconcileUpdate: func(t testing.TB, tg *multigresv1alpha1.TableGroup) { + tg.Spec.Shards = []multigresv1alpha1.ShardResolvedSpec{ + { + Name: "shard-1", // New shard + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + StatelessSpec: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(1)), + }, + }, + }, + } + }, + existingObjects: []client.Object{ + &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", tgName, "shard-0"), + Namespace: namespace, + Labels: map[string]string{ + "multigres.com/cluster": clusterName, + "multigres.com/database": dbName, + "multigres.com/tablegroup": tgLabelName, + }, + }, + Spec: multigresv1alpha1.ShardSpec{ShardName: "shard-0"}, + }, + }, + validate: func(t testing.TB, c client.Client) { + ctx := t.Context() + newShard := &multigresv1alpha1.Shard{} + if err := c.Get(ctx, types.NamespacedName{Name: fmt.Sprintf("%s-%s", tgName, "shard-1"), Namespace: namespace}, newShard); err != nil { + t.Error("New shard-1 not created") + } + oldShard := &multigresv1alpha1.Shard{} + if err := c.Get(ctx, types.NamespacedName{Name: fmt.Sprintf("%s-%s", tgName, "shard-0"), Namespace: namespace}, oldShard); !apierrors.IsNotFound( + err, + ) { + t.Error("Old shard-0 was not pruned") + } + }, + }, + "Status: Update Ready Count": { + tableGroup: baseTG.DeepCopy(), + existingObjects: []client.Object{ + &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", tgName, "shard-0"), + Namespace: namespace, + Labels: map[string]string{ + "multigres.com/cluster": clusterName, + "multigres.com/database": dbName, + "multigres.com/tablegroup": tgLabelName, + }, + }, + Spec: multigresv1alpha1.ShardSpec{ShardName: "shard-0"}, + Status: multigresv1alpha1.ShardStatus{ + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.Now(), + }, + }, + }, + }, + }, + validate: func(t testing.TB, c client.Client) { + updatedTG := &multigresv1alpha1.TableGroup{} + if err := c.Get(t.Context(), types.NamespacedName{Name: tgName, Namespace: namespace}, updatedTG); err != nil { + t.Fatalf("failed to get tablegroup: %v", err) + } + if got, want := updatedTG.Status.ReadyShards, int32(1); got != want { + t.Errorf("ReadyShards mismatch got %d, want %d", got, want) + } + }, + }, + "Status: Partial Ready (Not all shards ready)": { + tableGroup: baseTG.DeepCopy(), + existingObjects: []client.Object{ + &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", tgName, "shard-0"), + Namespace: namespace, + Labels: map[string]string{ + "multigres.com/cluster": clusterName, + "multigres.com/database": dbName, + "multigres.com/tablegroup": tgLabelName, + }, + }, + Spec: multigresv1alpha1.ShardSpec{ShardName: "shard-0"}, + // No status, so not ready + }, + }, + validate: func(t testing.TB, c client.Client) { + updatedTG := &multigresv1alpha1.TableGroup{} + if err := c.Get(t.Context(), types.NamespacedName{Name: tgName, Namespace: namespace}, updatedTG); err != nil { + t.Fatalf("failed to get tablegroup: %v", err) + } + if got, want := updatedTG.Status.ReadyShards, int32(0); got != want { + t.Errorf("ReadyShards mismatch got %d, want %d", got, want) + } + if meta.IsStatusConditionTrue(updatedTG.Status.Conditions, "Available") { + t.Error("TableGroup should NOT be Available") + } + }, + }, + "Status: Zero Shards (Vacuously True)": { + tableGroup: baseTG.DeepCopy(), + preReconcileUpdate: func(t testing.TB, tg *multigresv1alpha1.TableGroup) { + tg.Spec.Shards = []multigresv1alpha1.ShardResolvedSpec{} + }, + existingObjects: []client.Object{}, + validate: func(t testing.TB, c client.Client) { + updatedTG := &multigresv1alpha1.TableGroup{} + if err := c.Get(t.Context(), types.NamespacedName{Name: tgName, Namespace: namespace}, updatedTG); err != nil { + t.Fatalf("failed to get tablegroup: %v", err) + } + if !meta.IsStatusConditionTrue(updatedTG.Status.Conditions, "Available") { + t.Error("Zero shard TableGroup should be Available") + } + }, + }, + "Error: Object Not Found (Clean Exit)": { + tableGroup: baseTG.DeepCopy(), + skipCreate: true, + existingObjects: []client.Object{}, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Apply pre-reconcile updates if defined + if tc.preReconcileUpdate != nil { + tc.preReconcileUpdate(t, tc.tableGroup) + } + + objects := tc.existingObjects + // Inject TableGroup if creation is not skipped + if !tc.skipCreate { + objects = append(objects, tc.tableGroup) + } + + clientBuilder := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + WithStatusSubresource(&multigresv1alpha1.TableGroup{}, &multigresv1alpha1.Shard{}) + baseClient := clientBuilder.Build() + + reconciler := &TableGroupReconciler{ + Client: baseClient, + Scheme: scheme, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: tc.tableGroup.Name, + Namespace: tc.tableGroup.Namespace, + }, + } + + _, err := reconciler.Reconcile(t.Context(), req) + if err != nil { + t.Errorf("Unexpected error from Reconcile: %v", err) + } + + if tc.validate != nil { + tc.validate(t, baseClient) + } + }) + } +} + +func TestTableGroupReconciler_Reconcile_Failure(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + baseTG, tgName, namespace, clusterName, dbName, tgLabelName := setupFixtures(t) + errBoom := errors.New("boom") + + tests := map[string]struct { + tableGroup *multigresv1alpha1.TableGroup + existingObjects []client.Object + preReconcileUpdate func(testing.TB, *multigresv1alpha1.TableGroup) + failureConfig *testutil.FailureConfig + }{ + "Error: Get TableGroup Failed": { + tableGroup: baseTG.DeepCopy(), + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName(tgName, errBoom), + }, + }, + "Error: Create/Update Shard Failed": { + tableGroup: baseTG.DeepCopy(), + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnCreate: testutil.FailOnObjectName( + fmt.Sprintf("%s-%s", tgName, "shard-0"), + errBoom, + ), + }, + }, + "Error: List Shards Failed (during pruning)": { + tableGroup: baseTG.DeepCopy(), + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnList: func(list client.ObjectList) error { + if _, ok := list.(*multigresv1alpha1.ShardList); ok { + return errBoom + } + return nil + }, + }, + }, + "Error: List Shards Failed (during status check)": { + tableGroup: baseTG.DeepCopy(), + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnList: testutil.FailObjListAfterNCalls(1, errBoom), + }, + }, + "Error: Delete Orphan Shard Failed": { + tableGroup: baseTG.DeepCopy(), + preReconcileUpdate: func(t testing.TB, tg *multigresv1alpha1.TableGroup) { + tg.Spec.Shards = []multigresv1alpha1.ShardResolvedSpec{} + }, + existingObjects: []client.Object{ + &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", tgName, "shard-0"), + Namespace: namespace, + Labels: map[string]string{ + "multigres.com/cluster": clusterName, + "multigres.com/database": dbName, + "multigres.com/tablegroup": tgLabelName, + }, + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnDelete: testutil.FailOnObjectName( + fmt.Sprintf("%s-%s", tgName, "shard-0"), + errBoom, + ), + }, + }, + "Error: Update Status Failed": { + tableGroup: baseTG.DeepCopy(), + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnStatusUpdate: testutil.FailOnObjectName(tgName, errBoom), + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Apply pre-reconcile updates if defined + if tc.preReconcileUpdate != nil { + tc.preReconcileUpdate(t, tc.tableGroup) + } + + objects := tc.existingObjects + // Default behavior: create the TableGroup unless getting it is set to fail (which simulates Not Found or error) + // For failure tests, usually the object exists so the code can proceed to the failing step. + objects = append(objects, tc.tableGroup) + + clientBuilder := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + WithStatusSubresource(&multigresv1alpha1.TableGroup{}, &multigresv1alpha1.Shard{}) + baseClient := clientBuilder.Build() + + finalClient := client.Client(baseClient) + if tc.failureConfig != nil { + finalClient = testutil.NewFakeClientWithFailures(baseClient, tc.failureConfig) + } + + reconciler := &TableGroupReconciler{ + Client: finalClient, + Scheme: scheme, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: tc.tableGroup.Name, + Namespace: tc.tableGroup.Namespace, + }, + } + + _, err := reconciler.Reconcile(t.Context(), req) + if err == nil { + t.Error("Expected error from Reconcile, got nil") + } + }) + } +} + +func TestSetupWithManager_Coverage(t *testing.T) { + t.Parallel() + + // Test the default path (no options) + t.Run("No Options", func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Logf("Recovered expected panic: %v", r) + } + }() + reconciler := &TableGroupReconciler{} + _ = reconciler.SetupWithManager(nil) + }) + + // Test the path with options to ensure coverage of the 'if len(opts) > 0' block + t.Run("With Options", func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Logf("Recovered expected panic: %v", r) + } + }() + reconciler := &TableGroupReconciler{} + _ = reconciler.SetupWithManager(nil, controller.Options{MaxConcurrentReconciles: 1}) + }) +} diff --git a/pkg/cluster-handler/go.mod b/pkg/cluster-handler/go.mod index be5c4f59..82d86b55 100644 --- a/pkg/cluster-handler/go.mod +++ b/pkg/cluster-handler/go.mod @@ -1,3 +1,69 @@ module github.com/numtide/multigres-operator/pkg/cluster-handler go 1.25.0 + +require ( + github.com/google/go-cmp v0.7.0 + github.com/numtide/multigres-operator/api v0.0.0-20251222211535-b3e1d4ecd958 + github.com/numtide/multigres-operator/pkg/testutil v0.0.0-20251214105213-458b940d04bd + k8s.io/api v0.34.3 + k8s.io/apimachinery v0.34.3 + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 + sigs.k8s.io/controller-runtime v0.22.4 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.25.3 // indirect + github.com/onsi/gomega v1.38.3 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/spf13/pflag v1.0.6 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect + golang.org/x/time v0.9.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/protobuf v1.36.7 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.34.1 // indirect + k8s.io/client-go v0.34.3 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/pkg/cluster-handler/go.sum b/pkg/cluster-handler/go.sum new file mode 100644 index 00000000..a2593f6d --- /dev/null +++ b/pkg/cluster-handler/go.sum @@ -0,0 +1,206 @@ +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/numtide/multigres-operator/api v0.0.0-20251222211535-b3e1d4ecd958 h1:dqHU/SmEy5CUlWMRHwIIii4wPUZ1Cj4HDUxwqKvCOsM= +github.com/numtide/multigres-operator/api v0.0.0-20251222211535-b3e1d4ecd958/go.mod h1:A1bBmTxHr+362dGZ5G6u2S4xsP6enbgdUS/UJUOmKbc= +github.com/numtide/multigres-operator/pkg/testutil v0.0.0-20251214105213-458b940d04bd h1:gp55gShKenPt4r9K1EC3SKKeOMDDreypivBWzAD6XjQ= +github.com/numtide/multigres-operator/pkg/testutil v0.0.0-20251214105213-458b940d04bd/go.mod h1:+NQa7dSvQqxhBOE9XcE9RWXLvOvNaw0keCc29Y7pjyQ= +github.com/onsi/ginkgo/v2 v2.25.3 h1:Ty8+Yi/ayDAGtk4XxmmfUy4GabvM+MegeB4cDLRi6nw= +github.com/onsi/ginkgo/v2 v2.25.3/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= +github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM= +github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= +google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4= +k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk= +k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= +k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= +k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE= +k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A= +k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= +sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=