diff --git a/pkg/resource-handler/controller/cell/cell_controller.go b/pkg/resource-handler/controller/cell/cell_controller.go new file mode 100644 index 00000000..a9ca437e --- /dev/null +++ b/pkg/resource-handler/controller/cell/cell_controller.go @@ -0,0 +1,252 @@ +package cell + +import ( + "context" + "fmt" + "slices" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +const ( + finalizerName = "cell.multigres.com/finalizer" +) + +// CellReconciler reconciles a Cell object. +type CellReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// Reconcile handles Cell resource reconciliation. +func (r *CellReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Fetch the Cell instance + cell := &multigresv1alpha1.Cell{} + if err := r.Get(ctx, req.NamespacedName, cell); err != nil { + if errors.IsNotFound(err) { + logger.Info("Cell resource not found, ignoring") + return ctrl.Result{}, nil + } + logger.Error(err, "Failed to get Cell") + return ctrl.Result{}, err + } + + // Handle deletion + if !cell.DeletionTimestamp.IsZero() { + return r.handleDeletion(ctx, cell) + } + + // Add finalizer if not present + if !slices.Contains(cell.Finalizers, finalizerName) { + cell.Finalizers = append(cell.Finalizers, finalizerName) + if err := r.Update(ctx, cell); err != nil { + logger.Error(err, "Failed to add finalizer") + return ctrl.Result{}, err + } + } + + // Reconcile MultiGateway Deployment + if err := r.reconcileMultiGatewayDeployment(ctx, cell); err != nil { + logger.Error(err, "Failed to reconcile MultiGateway Deployment") + return ctrl.Result{}, err + } + + // Reconcile MultiGateway Service + if err := r.reconcileMultiGatewayService(ctx, cell); err != nil { + logger.Error(err, "Failed to reconcile MultiGateway Service") + return ctrl.Result{}, err + } + + // Update status + if err := r.updateStatus(ctx, cell); err != nil { + logger.Error(err, "Failed to update status") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// handleDeletion handles cleanup when Cell is being deleted. +func (r *CellReconciler) handleDeletion( + ctx context.Context, + cell *multigresv1alpha1.Cell, +) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + if slices.Contains(cell.Finalizers, finalizerName) { + // Perform cleanup if needed + // Currently no special cleanup required - owner references handle resource deletion + + // Remove finalizer + cell.Finalizers = slices.DeleteFunc(cell.Finalizers, func(s string) bool { + return s == finalizerName + }) + if err := r.Update(ctx, cell); err != nil { + logger.Error(err, "Failed to remove finalizer") + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil +} + +// reconcileMultiGatewayDeployment creates or updates the MultiGateway Deployment. +func (r *CellReconciler) reconcileMultiGatewayDeployment( + ctx context.Context, + cell *multigresv1alpha1.Cell, +) error { + desired, err := BuildMultiGatewayDeployment(cell, r.Scheme) + if err != nil { + return fmt.Errorf("failed to build MultiGateway Deployment: %w", err) + } + + existing := &appsv1.Deployment{} + name := cell.Name + "-multigateway" + err = r.Get(ctx, client.ObjectKey{Namespace: cell.Namespace, Name: name}, existing) + if err != nil { + if errors.IsNotFound(err) { + // Create new Deployment + if err := r.Create(ctx, desired); err != nil { + return fmt.Errorf("failed to create MultiGateway Deployment: %w", err) + } + return nil + } + return fmt.Errorf("failed to get MultiGateway Deployment: %w", err) + } + + // Update existing Deployment + existing.Spec = desired.Spec + existing.Labels = desired.Labels + if err := r.Update(ctx, existing); err != nil { + return fmt.Errorf("failed to update MultiGateway Deployment: %w", err) + } + + return nil +} + +// reconcileMultiGatewayService creates or updates the MultiGateway Service. +func (r *CellReconciler) reconcileMultiGatewayService( + ctx context.Context, + cell *multigresv1alpha1.Cell, +) error { + desired, err := BuildMultiGatewayService(cell, r.Scheme) + if err != nil { + return fmt.Errorf("failed to build MultiGateway Service: %w", err) + } + + existing := &corev1.Service{} + name := cell.Name + "-multigateway" + err = r.Get(ctx, client.ObjectKey{Namespace: cell.Namespace, Name: name}, existing) + if err != nil { + if errors.IsNotFound(err) { + // Create new Service + if err := r.Create(ctx, desired); err != nil { + return fmt.Errorf("failed to create MultiGateway Service: %w", err) + } + return nil + } + return fmt.Errorf("failed to get MultiGateway Service: %w", err) + } + + // Update existing Service + existing.Spec.Ports = desired.Spec.Ports + existing.Spec.Selector = desired.Spec.Selector + existing.Labels = desired.Labels + if err := r.Update(ctx, existing); err != nil { + return fmt.Errorf("failed to update MultiGateway Service: %w", err) + } + + return nil +} + +// updateStatus updates the Cell status based on observed state. +func (r *CellReconciler) updateStatus(ctx context.Context, cell *multigresv1alpha1.Cell) error { + // Get the MultiGateway Deployment to check status + mgDeploy := &appsv1.Deployment{} + err := r.Get( + ctx, + client.ObjectKey{Namespace: cell.Namespace, Name: cell.Name + "-multigateway"}, + mgDeploy, + ) + if err != nil { + if errors.IsNotFound(err) { + // Deployment not created yet + return nil + } + return fmt.Errorf("failed to get MultiGateway Deployment for status: %w", err) + } + + // Update status fields + cell.Status.ObservedGeneration = cell.Generation + + // Update conditions + cell.Status.Conditions = r.buildConditions(cell, mgDeploy) + + if err := r.Status().Update(ctx, cell); err != nil { + return fmt.Errorf("failed to update status: %w", err) + } + + return nil +} + +// buildConditions creates status conditions based on observed state. +func (r *CellReconciler) buildConditions( + cell *multigresv1alpha1.Cell, + mgDeploy *appsv1.Deployment, +) []metav1.Condition { + conditions := []metav1.Condition{} + + // Ready condition - MultiGateway must be ready + readyCondition := metav1.Condition{ + Type: "Ready", + ObservedGeneration: cell.Generation, + LastTransitionTime: metav1.Now(), + } + + mgReady := mgDeploy.Status.ReadyReplicas == mgDeploy.Status.Replicas && + mgDeploy.Status.Replicas > 0 + + if mgReady { + readyCondition.Status = metav1.ConditionTrue + readyCondition.Reason = "MultiGatewayReady" + readyCondition.Message = fmt.Sprintf( + "MultiGateway %d/%d ready", + mgDeploy.Status.ReadyReplicas, + mgDeploy.Status.Replicas, + ) + } else { + readyCondition.Status = metav1.ConditionFalse + readyCondition.Reason = "MultiGatewayNotReady" + readyCondition.Message = fmt.Sprintf("MultiGateway %d/%d ready", mgDeploy.Status.ReadyReplicas, mgDeploy.Status.Replicas) + } + + conditions = append(conditions, readyCondition) + return conditions +} + +// SetupWithManager sets up the controller with the Manager. +func (r *CellReconciler) SetupWithManager(mgr ctrl.Manager, opts ...controller.Options) error { + controllerOpts := controller.Options{} + if len(opts) > 0 { + controllerOpts = opts[0] + } + + return ctrl.NewControllerManagedBy(mgr). + For(&multigresv1alpha1.Cell{}). + Owns(&appsv1.Deployment{}). + Owns(&corev1.Service{}). + WithOptions(controllerOpts). + Complete(r) +} diff --git a/pkg/resource-handler/controller/cell/cell_controller_internal_test.go b/pkg/resource-handler/controller/cell/cell_controller_internal_test.go new file mode 100644 index 00000000..552029ca --- /dev/null +++ b/pkg/resource-handler/controller/cell/cell_controller_internal_test.go @@ -0,0 +1,301 @@ +package cell + +import ( + "context" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/testutil" +) + +// TestReconcileMultiGatewayDeployment_InvalidScheme tests the error path when BuildMultiGatewayDeployment fails. +// This should never happen in production - scheme is properly set up in main.go. +// Test exists for coverage of defensive error handling. +func TestReconcileMultiGatewayDeployment_InvalidScheme(t *testing.T) { + // Empty scheme without Cell type registered + invalidScheme := runtime.NewScheme() + + cell := &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(invalidScheme). + Build() + + reconciler := &CellReconciler{ + Client: fakeClient, + Scheme: invalidScheme, + } + + err := reconciler.reconcileMultiGatewayDeployment(context.Background(), cell) + if err == nil { + t.Error("reconcileMultiGatewayDeployment() should error with invalid scheme") + } +} + +// TestReconcileMultiGatewayService_InvalidScheme tests the error path when BuildMultiGatewayService fails. +func TestReconcileMultiGatewayService_InvalidScheme(t *testing.T) { + invalidScheme := runtime.NewScheme() + + cell := &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(invalidScheme). + Build() + + reconciler := &CellReconciler{ + Client: fakeClient, + Scheme: invalidScheme, + } + + err := reconciler.reconcileMultiGatewayService(context.Background(), cell) + if err == nil { + t.Error("reconcileMultiGatewayService() should error with invalid scheme") + } +} + +// TestUpdateStatus_MultiGatewayDeploymentNotFound tests the NotFound path in updateStatus. +func TestUpdateStatus_MultiGatewayDeploymentNotFound(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) // Need Deployment type registered for Get to work + + cell := &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(cell). + WithStatusSubresource(&multigresv1alpha1.Cell{}). + Build() + + reconciler := &CellReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + // Call updateStatus when MultiGateway Deployment doesn't exist yet + err := reconciler.updateStatus(context.Background(), cell) + if err != nil { + t.Errorf( + "updateStatus() should not error when MultiGateway Deployment not found, got: %v", + err, + ) + } +} + +// TestHandleDeletion_NoFinalizer tests early return when no finalizer is present. +func TestHandleDeletion_NoFinalizer(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + cell := &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + Finalizers: []string{}, // No finalizer + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(cell). + Build() + + reconciler := &CellReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + result, err := reconciler.handleDeletion(context.Background(), cell) + if err != nil { + t.Errorf("handleDeletion() should not error when no finalizer, got: %v", err) + } + if result.RequeueAfter > 0 { + t.Error("handleDeletion() should not requeue when no finalizer") + } +} + +// TestReconcileMultiGatewayDeployment_GetError tests error path on Get MultiGateway Deployment (not NotFound). +func TestReconcileMultiGatewayDeployment_GetError(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + cell := &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + } + + // Create client with failure injection + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(cell). + Build() + + fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-cell-multigateway", testutil.ErrNetworkTimeout), + }) + + reconciler := &CellReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + err := reconciler.reconcileMultiGatewayDeployment(context.Background(), cell) + if err == nil { + t.Error("reconcileMultiGatewayDeployment() should error on Get failure") + } +} + +// TestReconcileMultiGatewayService_GetError tests error path on Get MultiGateway Service (not NotFound). +func TestReconcileMultiGatewayService_GetError(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + cell := &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + } + + // Create client with failure injection + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(cell). + Build() + + fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-cell-multigateway", testutil.ErrNetworkTimeout), + }) + + reconciler := &CellReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + err := reconciler.reconcileMultiGatewayService(context.Background(), cell) + if err == nil { + t.Error("reconcileMultiGatewayService() should error on Get failure") + } +} + +// TestUpdateStatus_GetError tests error path on Get MultiGateway Deployment (not NotFound). +func TestUpdateStatus_GetError(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + + cell := &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + } + + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(cell). + WithStatusSubresource(&multigresv1alpha1.Cell{}). + Build() + + fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-cell-multigateway", testutil.ErrNetworkTimeout), + }) + + reconciler := &CellReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + err := reconciler.updateStatus(context.Background(), cell) + if err == nil { + t.Error("updateStatus() should error on Get failure") + } +} + +// TestBuildConditions_ZeroReplicas tests buildConditions when deployments have zero replicas. +func TestBuildConditions_ZeroReplicas(t *testing.T) { + reconciler := &CellReconciler{} + + cell := &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + Generation: 1, + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + } + + mgDeploy := &appsv1.Deployment{ + Status: appsv1.DeploymentStatus{ + Replicas: 0, + ReadyReplicas: 0, + }, + } + + conditions := reconciler.buildConditions(cell, mgDeploy) + + if len(conditions) == 0 { + t.Fatal("buildConditions() should return conditions") + } + + readyCondition := conditions[0] + if readyCondition.Type != "Ready" { + t.Errorf("Condition type = %s, want Ready", readyCondition.Type) + } + if readyCondition.Status != metav1.ConditionFalse { + t.Errorf("Condition status = %s, want False (zero replicas)", readyCondition.Status) + } + if readyCondition.Reason != "MultiGatewayNotReady" { + t.Errorf("Condition reason = %s, want MultiGatewayNotReady", readyCondition.Reason) + } +} diff --git a/pkg/resource-handler/controller/cell/cell_controller_test.go b/pkg/resource-handler/controller/cell/cell_controller_test.go new file mode 100644 index 00000000..81abdeaa --- /dev/null +++ b/pkg/resource-handler/controller/cell/cell_controller_test.go @@ -0,0 +1,689 @@ +package cell_test + +import ( + "slices" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/cell" + "github.com/numtide/multigres-operator/pkg/testutil" +) + +func TestCellReconciler_Reconcile(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + tests := map[string]struct { + cell *multigresv1alpha1.Cell + existingObjects []client.Object + failureConfig *testutil.FailureConfig + wantErr bool + wantRequeue bool + assertFunc func(t *testing.T, c client.Client, cell *multigresv1alpha1.Cell) + }{ + ////---------------------------------------- + /// Success + //------------------------------------------ + "create all resources for new Cell": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + existingObjects: []client.Object{}, + assertFunc: func(t *testing.T, c client.Client, cell *multigresv1alpha1.Cell) { + // Verify MultiGateway Deployment was created + mgDeploy := &appsv1.Deployment{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-cell-multigateway", Namespace: "default"}, + mgDeploy); err != nil { + t.Errorf("MultiGateway Deployment should exist: %v", err) + } + + // Verify MultiGateway Service was created + mgSvc := &corev1.Service{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-cell-multigateway", Namespace: "default"}, + mgSvc); err != nil { + t.Errorf("MultiGateway Service should exist: %v", err) + } + + // Verify defaults + if *mgDeploy.Spec.Replicas != int32(2) { + t.Errorf( + "MultiGateway Deployment replicas = %d, want %d", + *mgDeploy.Spec.Replicas, + int32(2), + ) + } + + // Verify finalizer was added + updatedCell := &multigresv1alpha1.Cell{} + if err := c.Get(t.Context(), types.NamespacedName{Name: "test-cell", Namespace: "default"}, updatedCell); err != nil { + t.Fatalf("Failed to get Cell: %v", err) + } + if !slices.Contains(updatedCell.Finalizers, "cell.multigres.com/finalizer") { + t.Errorf("Finalizer should be added") + } + }, + }, + "update existing resources": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-cell", + Namespace: "default", + Finalizers: []string{"cell.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone2", + MultiGateway: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(5)), + }, + Images: multigresv1alpha1.CellImagesSpec{ + MultiGateway: "custom/multigateway:v1.0.0", + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-cell-multigateway", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(2)), // will be updated to 5 + }, + Status: appsv1.DeploymentStatus{ + Replicas: 2, + ReadyReplicas: 2, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-cell-multigateway", + Namespace: "default", + }, + }, + }, + assertFunc: func(t *testing.T, c client.Client, cell *multigresv1alpha1.Cell) { + mgDeploy := &appsv1.Deployment{} + err := c.Get(t.Context(), types.NamespacedName{ + Name: "existing-cell-multigateway", + Namespace: "default", + }, mgDeploy) + if err != nil { + t.Fatalf("Failed to get MultiGateway Deployment: %v", err) + } + + if *mgDeploy.Spec.Replicas != 5 { + t.Errorf( + "MultiGateway Deployment replicas = %d, want 5", + *mgDeploy.Spec.Replicas, + ) + } + + if mgDeploy.Spec.Template.Spec.Containers[0].Image != "custom/multigateway:v1.0.0" { + t.Errorf( + "MultiGateway image = %s, want custom/multigateway:v1.0.0", + mgDeploy.Spec.Template.Spec.Containers[0].Image, + ) + } + }, + }, + "deletion with finalizer": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-deletion", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{"cell.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone3", + }, + }, + existingObjects: []client.Object{ + &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-deletion", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{"cell.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone3", + }, + }, + }, + assertFunc: func(t *testing.T, c client.Client, cell *multigresv1alpha1.Cell) { + updatedCell := &multigresv1alpha1.Cell{} + err := c.Get(t.Context(), + types.NamespacedName{Name: "test-cell-deletion", Namespace: "default"}, + updatedCell) + if err == nil { + t.Errorf( + "Cell object should be deleted but still exists (finalizers: %v)", + updatedCell.Finalizers, + ) + } + }, + }, + "all replicas ready status": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-ready", + Namespace: "default", + Finalizers: []string{"cell.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone4", + MultiGateway: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(2)), + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-ready-multigateway", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(2)), + }, + Status: appsv1.DeploymentStatus{ + Replicas: 2, + ReadyReplicas: 2, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-ready-multigateway", + Namespace: "default", + }, + }, + }, + assertFunc: func(t *testing.T, c client.Client, cell *multigresv1alpha1.Cell) { + updatedCell := &multigresv1alpha1.Cell{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-cell-ready", Namespace: "default"}, + updatedCell); err != nil { + t.Fatalf("Failed to get Cell: %v", err) + } + + if len(updatedCell.Status.Conditions) == 0 { + t.Error("Status.Conditions should not be empty") + } else { + readyCondition := updatedCell.Status.Conditions[0] + if readyCondition.Type != "Ready" { + t.Errorf("Condition type = %s, want Ready", readyCondition.Type) + } + if readyCondition.Status != metav1.ConditionTrue { + t.Errorf("Condition status = %s, want True", readyCondition.Status) + } + if readyCondition.Reason != "MultiGatewayReady" { + t.Errorf("Condition reason = %s, want MultiGatewayReady", readyCondition.Reason) + } + } + + if !slices.Contains(updatedCell.Finalizers, "cell.multigres.com/finalizer") { + t.Errorf("Finalizer should be present") + } + }, + }, + "not ready status - partial replicas": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-partial", + Namespace: "default", + Finalizers: []string{"cell.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone5", + MultiGateway: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(3)), + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-partial-multigateway", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(3)), + }, + Status: appsv1.DeploymentStatus{ + Replicas: 3, + ReadyReplicas: 2, // not all ready + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-partial-multigateway", + Namespace: "default", + }, + }, + }, + assertFunc: func(t *testing.T, c client.Client, cell *multigresv1alpha1.Cell) { + updatedCell := &multigresv1alpha1.Cell{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-cell-partial", Namespace: "default"}, + updatedCell); err != nil { + t.Fatalf("Failed to get Cell: %v", err) + } + + if len(updatedCell.Status.Conditions) == 0 { + t.Fatal("Status.Conditions should not be empty") + } + + readyCondition := updatedCell.Status.Conditions[0] + if readyCondition.Type != "Ready" { + t.Errorf("Condition type = %s, want Ready", readyCondition.Type) + } + if readyCondition.Status != metav1.ConditionFalse { + t.Errorf("Condition status = %s, want False", readyCondition.Status) + } + if readyCondition.Reason != "MultiGatewayNotReady" { + t.Errorf( + "Condition reason = %s, want MultiGatewayNotReady", + readyCondition.Reason, + ) + } + }, + }, + ////---------------------------------------- + /// Error + //------------------------------------------ + "error on status update": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnStatusUpdate: testutil.FailOnObjectName("test-cell", testutil.ErrInjected), + }, + wantErr: true, + }, + "error on Get MultiGateway Deployment in updateStatus (network error)": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-status", + Namespace: "default", + Finalizers: []string{"cell.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-status-multigateway", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-status-multigateway", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + // Fail MultiGateway Deployment Get in updateStatus (after reconcile calls) + OnGet: testutil.FailKeyAfterNCalls(2, testutil.ErrNetworkTimeout), + }, + wantErr: true, + }, + "error on MultiGateway Deployment create": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnCreate: func(obj client.Object) error { + if deploy, ok := obj.(*appsv1.Deployment); ok && + deploy.Name == "test-cell-multigateway" { + return testutil.ErrPermissionError + } + return nil + }, + }, + wantErr: true, + }, + "error on MultiGateway Deployment Update": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + Finalizers: []string{"cell.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + MultiGateway: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(5)), + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-multigateway", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(2)), + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: func(obj client.Object) error { + if deploy, ok := obj.(*appsv1.Deployment); ok && + deploy.Name == "test-cell-multigateway" { + return testutil.ErrInjected + } + return nil + }, + }, + wantErr: true, + }, + "error on Get MultiGateway Deployment (network error)": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + Finalizers: []string{"cell.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnGet: func(key client.ObjectKey) error { + if key.Name == "test-cell-multigateway" { + return testutil.ErrNetworkTimeout + } + return nil + }, + }, + wantErr: true, + }, + "error on MultiGateway Service create": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnCreate: func(obj client.Object) error { + if svc, ok := obj.(*corev1.Service); ok && + svc.Name == "test-cell-multigateway" { + return testutil.ErrPermissionError + } + return nil + }, + }, + wantErr: true, + }, + "error on MultiGateway Service Update": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + Finalizers: []string{"cell.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-multigateway", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-multigateway", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: func(obj client.Object) error { + if svc, ok := obj.(*corev1.Service); ok && + svc.Name == "test-cell-multigateway" { + return testutil.ErrInjected + } + return nil + }, + }, + wantErr: true, + }, + "error on Get MultiGateway Service (network error)": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-svc", + Namespace: "default", + Finalizers: []string{"cell.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-svc-multigateway", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnGet: testutil.FailOnNamespacedKeyName( + "test-cell-svc-multigateway", + "default", + testutil.ErrNetworkTimeout, + ), + }, + wantErr: true, + }, + "error on finalizer Update": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnUpdate: testutil.FailOnObjectName("test-cell", testutil.ErrInjected), + }, + wantErr: true, + }, + "deletion error on finalizer removal": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-del", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{"cell.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + existingObjects: []client.Object{ + &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-del", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{"cell.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: testutil.FailOnObjectName("test-cell-del", testutil.ErrInjected), + }, + wantErr: true, + }, + "error on Get Cell (network error)": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-cell", testutil.ErrNetworkTimeout), + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Create base fake client + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(tc.existingObjects...). + WithStatusSubresource(&multigresv1alpha1.Cell{}). + Build() + + fakeClient := client.Client(baseClient) + // Wrap with failure injection if configured + if tc.failureConfig != nil { + fakeClient = testutil.NewFakeClientWithFailures(baseClient, tc.failureConfig) + } + + reconciler := &cell.CellReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + // Create the Cell resource if not in existing objects + cellInExisting := false + for _, obj := range tc.existingObjects { + if cell, ok := obj.(*multigresv1alpha1.Cell); ok && cell.Name == tc.cell.Name { + cellInExisting = true + break + } + } + if !cellInExisting { + err := fakeClient.Create(t.Context(), tc.cell) + if err != nil { + t.Fatalf("Failed to create Cell: %v", err) + } + } + + // Reconcile + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: tc.cell.Name, + Namespace: tc.cell.Namespace, + }, + } + + result, err := reconciler.Reconcile(t.Context(), req) + if (err != nil) != tc.wantErr { + t.Errorf("Reconcile() error = %v, wantErr %v", err, tc.wantErr) + return + } + if tc.wantErr { + return + } + + // NOTE: Check for requeue delay when we need to support such setup. + _ = result + + // Run custom assertions if provided + if tc.assertFunc != nil { + tc.assertFunc(t, fakeClient, tc.cell) + } + }) + } +} + +func TestCellReconciler_ReconcileNotFound(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + reconciler := &cell.CellReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + // Reconcile non-existent resource + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "nonexistent-cell", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(t.Context(), req) + if err != nil { + t.Errorf("Reconcile() should not error on NotFound, got: %v", err) + } + if result.RequeueAfter > 0 { + t.Errorf("Reconcile() should not requeue on NotFound") + } +} diff --git a/pkg/resource-handler/controller/cell/integration_test.go b/pkg/resource-handler/controller/cell/integration_test.go new file mode 100644 index 00000000..f658a1c5 --- /dev/null +++ b/pkg/resource-handler/controller/cell/integration_test.go @@ -0,0 +1,488 @@ +//go:build integration +// +build integration + +package cell_test + +import ( + "path/filepath" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + cellcontroller "github.com/numtide/multigres-operator/pkg/resource-handler/controller/cell" + "github.com/numtide/multigres-operator/pkg/testutil" +) + +func TestSetupWithManager(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + mgr := testutil.SetUpEnvtestManager(t, scheme, + testutil.WithCRDPaths( + filepath.Join("../../../../", "config", "crd", "bases"), + ), + ) + + if err := (&cellcontroller.CellReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr, controller.Options{ + SkipNameValidation: ptr.To(true), + }); err != nil { + t.Fatalf("Failed to create controller, %v", err) + } +} + +func TestCellReconciliation(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + tests := map[string]struct { + cell *multigresv1alpha1.Cell + existingObjects []client.Object + wantResources []client.Object + wantErr bool + assertFunc func(t *testing.T, c client.Client, cell *multigresv1alpha1.Cell) + }{ + "simple cell with default replicas": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + MultiGateway: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(2)), + }, + MultiOrch: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(2)), + }, + GlobalTopoServer: multigresv1alpha1.GlobalTopoServerRefSpec{ + ClientServiceName: "global-topo", + }, + TopoServer: multigresv1alpha1.CellTopoServerSpec{}, + }, + }, + wantResources: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-multigateway", + Namespace: "default", + Labels: cellLabels(t, "test-cell-multigateway", "multigateway", "zone1"), + OwnerReferences: cellOwnerRefs(t, "test-cell"), + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(2)), + Selector: &metav1.LabelSelector{ + MatchLabels: cellLabels(t, "test-cell-multigateway", "multigateway", "zone1"), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: cellLabels(t, "test-cell-multigateway", "multigateway", "zone1"), + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "multigateway", + Image: "numtide/multigres-operator:latest", + Ports: []corev1.ContainerPort{ + tcpPort(t, "http", 15100), + tcpPort(t, "grpc", 15170), + tcpPort(t, "postgres", 15432), + }, + }, + }, + }, + }, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-multigateway", + Namespace: "default", + Labels: cellLabels(t, "test-cell-multigateway", "multigateway", "zone1"), + OwnerReferences: cellOwnerRefs(t, "test-cell"), + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + tcpServicePort(t, "http", 15100), + tcpServicePort(t, "grpc", 15170), + tcpServicePort(t, "postgres", 15432), + }, + Selector: cellLabels(t, "test-cell-multigateway", "multigateway", "zone1"), + }, + }, + }, + }, + "cell with custom replicas": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "custom-replicas-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone2", + MultiGateway: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(3)), + }, + MultiOrch: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(3)), + }, + GlobalTopoServer: multigresv1alpha1.GlobalTopoServerRefSpec{ + ClientServiceName: "global-topo", + }, + TopoServer: multigresv1alpha1.CellTopoServerSpec{}, + }, + }, + wantResources: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "custom-replicas-cell-multigateway", + Namespace: "default", + Labels: cellLabels(t, "custom-replicas-cell-multigateway", "multigateway", "zone2"), + OwnerReferences: cellOwnerRefs(t, "custom-replicas-cell"), + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(3)), + Selector: &metav1.LabelSelector{ + MatchLabels: cellLabels(t, "custom-replicas-cell-multigateway", "multigateway", "zone2"), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: cellLabels(t, "custom-replicas-cell-multigateway", "multigateway", "zone2"), + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "multigateway", + Image: "numtide/multigres-operator:latest", + Ports: []corev1.ContainerPort{ + tcpPort(t, "http", 15100), + tcpPort(t, "grpc", 15170), + tcpPort(t, "postgres", 15432), + }, + }, + }, + }, + }, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "custom-replicas-cell-multigateway", + Namespace: "default", + Labels: cellLabels(t, "custom-replicas-cell-multigateway", "multigateway", "zone2"), + OwnerReferences: cellOwnerRefs(t, "custom-replicas-cell"), + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + tcpServicePort(t, "http", 15100), + tcpServicePort(t, "grpc", 15170), + tcpServicePort(t, "postgres", 15432), + }, + Selector: cellLabels(t, "custom-replicas-cell-multigateway", "multigateway", "zone2"), + }, + }, + }, + }, + "cell with custom images": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "custom-images-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone3", + Images: multigresv1alpha1.CellImagesSpec{ + MultiGateway: "custom/multigateway:v1.0.0", + MultiOrch: "custom/multiorch:v1.0.0", + }, + MultiGateway: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(2)), + }, + MultiOrch: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(2)), + }, + GlobalTopoServer: multigresv1alpha1.GlobalTopoServerRefSpec{ + ClientServiceName: "global-topo", + }, + TopoServer: multigresv1alpha1.CellTopoServerSpec{}, + }, + }, + wantResources: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "custom-images-cell-multigateway", + Namespace: "default", + Labels: cellLabels(t, "custom-images-cell-multigateway", "multigateway", "zone3"), + OwnerReferences: cellOwnerRefs(t, "custom-images-cell"), + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(2)), + Selector: &metav1.LabelSelector{ + MatchLabels: cellLabels(t, "custom-images-cell-multigateway", "multigateway", "zone3"), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: cellLabels(t, "custom-images-cell-multigateway", "multigateway", "zone3"), + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "multigateway", + Image: "custom/multigateway:v1.0.0", + Ports: []corev1.ContainerPort{ + tcpPort(t, "http", 15100), + tcpPort(t, "grpc", 15170), + tcpPort(t, "postgres", 15432), + }, + }, + }, + }, + }, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "custom-images-cell-multigateway", + Namespace: "default", + Labels: cellLabels(t, "custom-images-cell-multigateway", "multigateway", "zone3"), + OwnerReferences: cellOwnerRefs(t, "custom-images-cell"), + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + tcpServicePort(t, "http", 15100), + tcpServicePort(t, "grpc", 15170), + tcpServicePort(t, "postgres", 15432), + }, + Selector: cellLabels(t, "custom-images-cell-multigateway", "multigateway", "zone3"), + }, + }, + }, + }, + "cell with affinity": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "affinity-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone4", + MultiGateway: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(2)), + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "node-type", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"gateway"}, + }, + }, + }, + }, + }, + }, + }, + }, + MultiOrch: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(2)), + Affinity: &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + { + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/component": "multiorch", + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + }, + }, + GlobalTopoServer: multigresv1alpha1.GlobalTopoServerRefSpec{ + ClientServiceName: "global-topo", + }, + TopoServer: multigresv1alpha1.CellTopoServerSpec{}, + }, + }, + wantResources: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "affinity-cell-multigateway", + Namespace: "default", + Labels: cellLabels(t, "affinity-cell-multigateway", "multigateway", "zone4"), + OwnerReferences: cellOwnerRefs(t, "affinity-cell"), + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(2)), + Selector: &metav1.LabelSelector{ + MatchLabels: cellLabels(t, "affinity-cell-multigateway", "multigateway", "zone4"), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: cellLabels(t, "affinity-cell-multigateway", "multigateway", "zone4"), + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "multigateway", + Image: "numtide/multigres-operator:latest", + Ports: []corev1.ContainerPort{ + tcpPort(t, "http", 15100), + tcpPort(t, "grpc", 15170), + tcpPort(t, "postgres", 15432), + }, + }, + }, + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "node-type", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"gateway"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "affinity-cell-multigateway", + Namespace: "default", + Labels: cellLabels(t, "affinity-cell-multigateway", "multigateway", "zone4"), + OwnerReferences: cellOwnerRefs(t, "affinity-cell"), + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + tcpServicePort(t, "http", 15100), + tcpServicePort(t, "grpc", 15170), + tcpServicePort(t, "postgres", 15432), + }, + Selector: cellLabels(t, "affinity-cell-multigateway", "multigateway", "zone4"), + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + ctx := t.Context() + mgr := testutil.SetUpEnvtestManager(t, scheme, + testutil.WithCRDPaths( + filepath.Join("../../../../", "config", "crd", "bases"), + ), + ) + + watcher := testutil.NewResourceWatcher(t, ctx, mgr, + testutil.WithCmpOpts( + testutil.IgnoreMetaRuntimeFields(), + testutil.IgnoreServiceRuntimeFields(), + testutil.IgnoreDeploymentRuntimeFields(), + testutil.IgnorePodSpecDefaults(), + testutil.IgnoreDeploymentSpecDefaults(), + ), + testutil.WithExtraResource(&multigresv1alpha1.Cell{}), + ) + client := mgr.GetClient() + + cellReconciler := &cellcontroller.CellReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + } + if err := cellReconciler.SetupWithManager(mgr, controller.Options{ + // Needed for the parallel test runs + SkipNameValidation: ptr.To(true), + }); err != nil { + t.Fatalf("Failed to create controller, %v", err) + } + + if err := client.Create(ctx, tc.cell); err != nil { + t.Fatalf("Failed to create the initial item, %v", err) + } + + if err := watcher.WaitForMatch(tc.wantResources...); err != nil { + t.Errorf("Resources mismatch:\n%v", err) + } + }) + } +} + +// Test helpers + +// cellLabels returns standard labels for cell resources in tests +func cellLabels(t testing.TB, instanceName, component, cellName string) map[string]string { + t.Helper() + return map[string]string{ + "app.kubernetes.io/component": component, + "app.kubernetes.io/instance": instanceName, + "app.kubernetes.io/managed-by": "multigres-operator", + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/part-of": "multigres", + } +} + +// cellOwnerRefs returns owner references for a Cell resource +func cellOwnerRefs(t testing.TB, cellName string) []metav1.OwnerReference { + t.Helper() + return []metav1.OwnerReference{{ + APIVersion: "multigres.com/v1alpha1", + Kind: "Cell", + Name: cellName, + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }} +} + +// tcpPort creates a simple TCP container port +func tcpPort(t testing.TB, name string, port int32) corev1.ContainerPort { + t.Helper() + return corev1.ContainerPort{Name: name, ContainerPort: port, Protocol: corev1.ProtocolTCP} +} + +// tcpServicePort creates a TCP service port with named target +func tcpServicePort(t testing.TB, name string, port int32) corev1.ServicePort { + t.Helper() + return corev1.ServicePort{Name: name, Port: port, TargetPort: intstr.FromString(name), Protocol: corev1.ProtocolTCP} +} diff --git a/pkg/resource-handler/controller/cell/multigateway.go b/pkg/resource-handler/controller/cell/multigateway.go new file mode 100644 index 00000000..83cba7b1 --- /dev/null +++ b/pkg/resource-handler/controller/cell/multigateway.go @@ -0,0 +1,153 @@ +package cell + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + ctrl "sigs.k8s.io/controller-runtime" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/metadata" +) + +const ( + // MultiGatewayComponentName is the component label value for MultiGateway resources + MultiGatewayComponentName = "multigateway" + + // DefaultMultiGatewayReplicas is the default number of MultiGateway replicas + DefaultMultiGatewayReplicas int32 = 2 + + // DefaultMultiGatewayImage is the default MultiGateway container image + DefaultMultiGatewayImage = "numtide/multigres-operator:latest" + + // MultiGatewayHTTPPort is the default port for HTTP connections + MultiGatewayHTTPPort int32 = 15100 + + // MultiGatewayGRPCPort is the default port for GRPC connections + MultiGatewayGRPCPort int32 = 15170 + + // MultiGatewayPostgresPort is the default port for database connections + MultiGatewayPostgresPort int32 = 15432 +) + +// BuildMultiGatewayDeployment creates a Deployment for the MultiGateway component. +func BuildMultiGatewayDeployment( + cell *multigresv1alpha1.Cell, + scheme *runtime.Scheme, +) (*appsv1.Deployment, error) { + replicas := DefaultMultiGatewayReplicas + if cell.Spec.MultiGateway.Replicas != nil { + replicas = *cell.Spec.MultiGateway.Replicas + } + + image := DefaultMultiGatewayImage + if cell.Spec.Images.MultiGateway != "" { + image = cell.Spec.Images.MultiGateway + } + + name := cell.Name + "-multigateway" + labels := metadata.BuildStandardLabels(name, MultiGatewayComponentName) + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: cell.Namespace, + Labels: labels, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "multigateway", + Image: image, + Resources: cell.Spec.MultiGateway.ResourceRequirements, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: MultiGatewayHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + ContainerPort: MultiGatewayGRPCPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + ContainerPort: MultiGatewayPostgresPort, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + Affinity: cell.Spec.MultiGateway.Affinity, + }, + }, + }, + } + + if err := ctrl.SetControllerReference(cell, deployment, scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + + return deployment, nil +} + +// BuildMultiGatewayService creates a Service for the MultiGateway component. +func BuildMultiGatewayService( + cell *multigresv1alpha1.Cell, + scheme *runtime.Scheme, +) (*corev1.Service, error) { + name := cell.Name + "-multigateway" + labels := metadata.BuildStandardLabels(name, MultiGatewayComponentName) + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: cell.Namespace, + Labels: labels, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: labels, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: MultiGatewayHTTPPort, + TargetPort: intstr.FromString("http"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + Port: MultiGatewayGRPCPort, + TargetPort: intstr.FromString("grpc"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + Port: MultiGatewayPostgresPort, + TargetPort: intstr.FromString("postgres"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } + + if err := ctrl.SetControllerReference(cell, svc, scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + + return svc, nil +} diff --git a/pkg/resource-handler/controller/cell/multigateway_test.go b/pkg/resource-handler/controller/cell/multigateway_test.go new file mode 100644 index 00000000..0a0fed59 --- /dev/null +++ b/pkg/resource-handler/controller/cell/multigateway_test.go @@ -0,0 +1,726 @@ +package cell + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +func TestBuildMultiGatewayDeployment(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + tests := map[string]struct { + cell *multigresv1alpha1.Cell + scheme *runtime.Scheme + want *appsv1.Deployment + wantErr bool + }{ + "minimal spec - all defaults": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + scheme: scheme, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-multigateway", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-cell-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Cell", + Name: "test-cell", + UID: "test-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(DefaultMultiGatewayReplicas), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-cell-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-cell-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "multigateway", + Image: DefaultMultiGatewayImage, + Resources: corev1.ResourceRequirements{}, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: MultiGatewayHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + ContainerPort: MultiGatewayGRPCPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + ContainerPort: MultiGatewayPostgresPort, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "custom replicas": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cell-custom-replicas", + Namespace: "test-ns", + UID: "custom-uid", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone2", + MultiGateway: multigresv1alpha1.StatelessSpec{ + Replicas: ptr.To(int32(5)), + }, + }, + }, + scheme: scheme, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cell-custom-replicas-multigateway", + Namespace: "test-ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "cell-custom-replicas-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Cell", + Name: "cell-custom-replicas", + UID: "custom-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(5)), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "cell-custom-replicas-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "cell-custom-replicas-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "multigateway", + Image: DefaultMultiGatewayImage, + Resources: corev1.ResourceRequirements{}, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: MultiGatewayHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + ContainerPort: MultiGatewayGRPCPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + ContainerPort: MultiGatewayPostgresPort, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "custom image": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cell-custom-image", + Namespace: "default", + UID: "image-uid", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone3", + Images: multigresv1alpha1.CellImagesSpec{ + MultiGateway: "custom/multigateway:v1.2.3", + }, + }, + }, + scheme: scheme, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cell-custom-image-multigateway", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "cell-custom-image-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Cell", + Name: "cell-custom-image", + UID: "image-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(DefaultMultiGatewayReplicas), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "cell-custom-image-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "cell-custom-image-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "multigateway", + Image: "custom/multigateway:v1.2.3", + Resources: corev1.ResourceRequirements{}, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: MultiGatewayHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + ContainerPort: MultiGatewayGRPCPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + ContainerPort: MultiGatewayPostgresPort, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "with affinity": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cell-affinity", + Namespace: "default", + UID: "affinity-uid", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone4", + MultiGateway: multigresv1alpha1.StatelessSpec{ + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "node-type", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"gateway"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + scheme: scheme, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cell-affinity-multigateway", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "cell-affinity-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Cell", + Name: "cell-affinity", + UID: "affinity-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(DefaultMultiGatewayReplicas), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "cell-affinity-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "cell-affinity-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "multigateway", + Image: DefaultMultiGatewayImage, + Resources: corev1.ResourceRequirements{}, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: MultiGatewayHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + ContainerPort: MultiGatewayGRPCPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + ContainerPort: MultiGatewayPostgresPort, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "node-type", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"gateway"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "with resource requirements": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cell-resources", + Namespace: "default", + UID: "resources-uid", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone5", + MultiGateway: multigresv1alpha1.StatelessSpec{ + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("512Mi"), + }, + }, + }, + }, + }, + scheme: scheme, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cell-resources-multigateway", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "cell-resources-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Cell", + Name: "cell-resources", + UID: "resources-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(DefaultMultiGatewayReplicas), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "cell-resources-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "cell-resources-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "multigateway", + Image: DefaultMultiGatewayImage, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("512Mi"), + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: MultiGatewayHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + ContainerPort: MultiGatewayGRPCPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + ContainerPort: MultiGatewayPostgresPort, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "invalid scheme - should error": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + scheme: runtime.NewScheme(), // empty scheme + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got, err := BuildMultiGatewayDeployment(tc.cell, tc.scheme) + + if (err != nil) != tc.wantErr { + t.Errorf("BuildMultiGatewayDeployment() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildMultiGatewayDeployment() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildMultiGatewayService(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + tests := map[string]struct { + cell *multigresv1alpha1.Cell + scheme *runtime.Scheme + want *corev1.Service + wantErr bool + }{ + "minimal spec": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + scheme: scheme, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell-multigateway", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-cell-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Cell", + Name: "test-cell", + UID: "test-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-cell-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: MultiGatewayHTTPPort, + TargetPort: intstr.FromString("http"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + Port: MultiGatewayGRPCPort, + TargetPort: intstr.FromString("grpc"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + Port: MultiGatewayPostgresPort, + TargetPort: intstr.FromString("postgres"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + "with different cell name": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "production-cell", + Namespace: "prod-ns", + UID: "prod-uid", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "us-west", + }, + }, + scheme: scheme, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "production-cell-multigateway", + Namespace: "prod-ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "production-cell-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Cell", + Name: "production-cell", + UID: "prod-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "production-cell-multigateway", + "app.kubernetes.io/component": "multigateway", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: MultiGatewayHTTPPort, + TargetPort: intstr.FromString("http"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + Port: MultiGatewayGRPCPort, + TargetPort: intstr.FromString("grpc"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + Port: MultiGatewayPostgresPort, + TargetPort: intstr.FromString("postgres"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + "invalid scheme - should error": { + cell: &multigresv1alpha1.Cell{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cell", + Namespace: "default", + }, + Spec: multigresv1alpha1.CellSpec{ + Name: "zone1", + }, + }, + scheme: runtime.NewScheme(), // empty scheme + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got, err := BuildMultiGatewayService(tc.cell, tc.scheme) + + if (err != nil) != tc.wantErr { + t.Errorf("BuildMultiGatewayService() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildMultiGatewayService() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/resource-handler/controller/etcd/service_test.go b/pkg/resource-handler/controller/etcd/service_test.go deleted file mode 100644 index 9a9ca6dd..00000000 --- a/pkg/resource-handler/controller/etcd/service_test.go +++ /dev/null @@ -1,311 +0,0 @@ -package etcd - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - - multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" -) - -func TestBuildHeadlessService(t *testing.T) { - scheme := runtime.NewScheme() - _ = multigresv1alpha1.AddToScheme(scheme) - - tests := map[string]struct { - etcd *multigresv1alpha1.Etcd - scheme *runtime.Scheme - want *corev1.Service - wantErr bool - }{ - "minimal spec": { - etcd: &multigresv1alpha1.Etcd{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - UID: "test-uid", - }, - Spec: multigresv1alpha1.EtcdSpec{}, - }, - scheme: scheme, - want: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-headless", - Namespace: "default", - Labels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-etcd", - "app.kubernetes.io/component": "etcd", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "multigres.com/v1alpha1", - Kind: "Etcd", - Name: "test-etcd", - UID: "test-uid", - Controller: boolPtr(true), - BlockOwnerDeletion: boolPtr(true), - }, - }, - }, - Spec: corev1.ServiceSpec{ - ClusterIP: corev1.ClusterIPNone, - Selector: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-etcd", - "app.kubernetes.io/component": "etcd", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - Ports: []corev1.ServicePort{ - { - Name: "client", - Port: 2379, - TargetPort: intstr.FromString("client"), - Protocol: corev1.ProtocolTCP, - }, - { - Name: "peer", - Port: 2380, - TargetPort: intstr.FromString("peer"), - Protocol: corev1.ProtocolTCP, - }, - }, - PublishNotReadyAddresses: true, - }, - }, - }, - "with cellName": { - etcd: &multigresv1alpha1.Etcd{ - ObjectMeta: metav1.ObjectMeta{ - Name: "etcd-zone1", - Namespace: "production", - UID: "zone1-uid", - }, - Spec: multigresv1alpha1.EtcdSpec{ - CellName: "zone1", - }, - }, - scheme: scheme, - want: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "etcd-zone1-headless", - Namespace: "production", - Labels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "etcd-zone1", - "app.kubernetes.io/component": "etcd", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "zone1", - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "multigres.com/v1alpha1", - Kind: "Etcd", - Name: "etcd-zone1", - UID: "zone1-uid", - Controller: boolPtr(true), - BlockOwnerDeletion: boolPtr(true), - }, - }, - }, - Spec: corev1.ServiceSpec{ - ClusterIP: corev1.ClusterIPNone, - Selector: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "etcd-zone1", - "app.kubernetes.io/component": "etcd", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "zone1", - }, - Ports: []corev1.ServicePort{ - { - Name: "client", - Port: 2379, - TargetPort: intstr.FromString("client"), - Protocol: corev1.ProtocolTCP, - }, - { - Name: "peer", - Port: 2380, - TargetPort: intstr.FromString("peer"), - Protocol: corev1.ProtocolTCP, - }, - }, - PublishNotReadyAddresses: true, - }, - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got, err := BuildHeadlessService(tc.etcd, tc.scheme) - - if (err != nil) != tc.wantErr { - t.Errorf("BuildHeadlessService() error = %v, wantErr %v", err, tc.wantErr) - return - } - - if tc.wantErr { - return - } - - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("BuildHeadlessService() mismatch (-want +got):\n%s", diff) - } - }) - } -} - -func TestBuildClientService(t *testing.T) { - scheme := runtime.NewScheme() - _ = multigresv1alpha1.AddToScheme(scheme) - - tests := map[string]struct { - etcd *multigresv1alpha1.Etcd - scheme *runtime.Scheme - want *corev1.Service - wantErr bool - }{ - "minimal spec": { - etcd: &multigresv1alpha1.Etcd{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - UID: "test-uid", - }, - Spec: multigresv1alpha1.EtcdSpec{}, - }, - scheme: scheme, - want: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - Labels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-etcd", - "app.kubernetes.io/component": "etcd", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "multigres.com/v1alpha1", - Kind: "Etcd", - Name: "test-etcd", - UID: "test-uid", - Controller: boolPtr(true), - BlockOwnerDeletion: boolPtr(true), - }, - }, - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeClusterIP, - Selector: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-etcd", - "app.kubernetes.io/component": "etcd", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - Ports: []corev1.ServicePort{ - { - Name: "client", - Port: 2379, - TargetPort: intstr.FromString("client"), - Protocol: corev1.ProtocolTCP, - }, - }, - }, - }, - }, - "with cellName": { - etcd: &multigresv1alpha1.Etcd{ - ObjectMeta: metav1.ObjectMeta{ - Name: "etcd-zone2", - Namespace: "production", - UID: "zone2-uid", - }, - Spec: multigresv1alpha1.EtcdSpec{ - CellName: "zone2", - }, - }, - scheme: scheme, - want: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "etcd-zone2", - Namespace: "production", - Labels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "etcd-zone2", - "app.kubernetes.io/component": "etcd", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "zone2", - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "multigres.com/v1alpha1", - Kind: "Etcd", - Name: "etcd-zone2", - UID: "zone2-uid", - Controller: boolPtr(true), - BlockOwnerDeletion: boolPtr(true), - }, - }, - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeClusterIP, - Selector: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "etcd-zone2", - "app.kubernetes.io/component": "etcd", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "zone2", - }, - Ports: []corev1.ServicePort{ - { - Name: "client", - Port: 2379, - TargetPort: intstr.FromString("client"), - Protocol: corev1.ProtocolTCP, - }, - }, - }, - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got, err := BuildClientService(tc.etcd, tc.scheme) - - if (err != nil) != tc.wantErr { - t.Errorf("BuildClientService() error = %v, wantErr %v", err, tc.wantErr) - return - } - - if tc.wantErr { - return - } - - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("BuildClientService() mismatch (-want +got):\n%s", diff) - } - }) - } -} diff --git a/pkg/resource-handler/controller/metadata/labels.go b/pkg/resource-handler/controller/metadata/labels.go index 74c7cb5b..c52dcfab 100644 --- a/pkg/resource-handler/controller/metadata/labels.go +++ b/pkg/resource-handler/controller/metadata/labels.go @@ -40,6 +40,18 @@ const ( // LabelMultigresCell identifies which cell a resource belongs to. LabelMultigresCell = "multigres.com/cell" + // LabelMultigresCluster identifies which cluster a resource belongs to. + LabelMultigresCluster = "multigres.com/cluster" + + // LabelMultigresShard identifies which shard a resource belongs to. + LabelMultigresShard = "multigres.com/shard" + + // LabelMultigresDatabase identifies which database a resource belongs to. + LabelMultigresDatabase = "multigres.com/database" + + // LabelMultigresTableGroup identifies which table group a resource belongs to. + LabelMultigresTableGroup = "multigres.com/tablegroup" + // DefaultCellName is the default cell name when none is specified. DefaultCellName = "multigres-global-topo" ) @@ -50,7 +62,6 @@ const ( // Parameters: // - resourceName: The name of the custom resource instance (e.g., "my-etcd-cluster") // - componentName: The component type (e.g., "etcd", "gateway", "orch", "pooler") -// - cellName: Optional cell name. If empty, no cell label is added. // // Standard labels include: // - app.kubernetes.io/name: "multigres" @@ -58,37 +69,59 @@ const ( // - app.kubernetes.io/component: // - app.kubernetes.io/part-of: "multigres" // - app.kubernetes.io/managed-by: "multigres-operator" -// - multigres.com/cell: (uses "multigres-global-topo" if empty) // // Example usage: // -// labels := BuildStandardLabels("my-etcd", "etcd", "cell-1") +// labels := BuildStandardLabels("my-etcd", "etcd") // // Returns: { // // "app.kubernetes.io/name": "multigres", // // "app.kubernetes.io/instance": "my-etcd", // // "app.kubernetes.io/component": "etcd", // // "app.kubernetes.io/part-of": "multigres", // // "app.kubernetes.io/managed-by": "multigres-operator", -// // "multigres.com/cell": "cell-1" // // } -func BuildStandardLabels(resourceName, componentName, cellName string) map[string]string { - // Use default cell name if not provided - if cellName == "" { - cellName = DefaultCellName - } - +func BuildStandardLabels(resourceName, componentName string) map[string]string { labels := map[string]string{ - LabelAppName: AppNameMultigres, - LabelAppInstance: resourceName, - LabelAppComponent: componentName, - LabelAppPartOf: AppNameMultigres, - LabelAppManagedBy: ManagedByMultigres, - LabelMultigresCell: cellName, + LabelAppName: AppNameMultigres, + LabelAppInstance: resourceName, + LabelAppComponent: componentName, + LabelAppPartOf: AppNameMultigres, + LabelAppManagedBy: ManagedByMultigres, } return labels } +// AddCellLabel adds the cell label to the provided labels map. +func AddCellLabel(labels map[string]string, cellName string) map[string]string { + labels[LabelMultigresCell] = cellName + return labels +} + +// AddClusterLabel adds the cluster label to the provided labels map. +func AddClusterLabel(labels map[string]string, clusterName string) map[string]string { + labels[LabelMultigresCluster] = clusterName + return labels +} + +// AddShardLabel adds the shard label to the provided labels map. +func AddShardLabel(labels map[string]string, shardName string) map[string]string { + labels[LabelMultigresShard] = shardName + return labels +} + +// AddDatabaseLabel adds the database label to the provided labels map. +func AddDatabaseLabel(labels map[string]string, databaseName string) map[string]string { + labels[LabelMultigresDatabase] = databaseName + return labels +} + +// AddTableGroupLabel adds the table group label to the provided labels map. +func AddTableGroupLabel(labels map[string]string, tableGroupName string) map[string]string { + labels[LabelMultigresTableGroup] = tableGroupName + return labels +} + // MergeLabels merges custom labels with standard labels. // // Note that standard labels take precedence over custom labels to prevent users diff --git a/pkg/resource-handler/controller/metadata/labels_test.go b/pkg/resource-handler/controller/metadata/labels_test.go index d0b2fa00..c59c8a3e 100644 --- a/pkg/resource-handler/controller/metadata/labels_test.go +++ b/pkg/resource-handler/controller/metadata/labels_test.go @@ -12,118 +12,35 @@ func TestBuildStandardLabels(t *testing.T) { tests := map[string]struct { resourceName string componentName string - cellName string want map[string]string }{ - "basic case with all parameters": { + "typical case": { resourceName: "my-etcd-cluster", componentName: "etcd", - cellName: "cell-1", want: map[string]string{ "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "my-etcd-cluster", "app.kubernetes.io/component": "etcd", "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "cell-1", }, }, - "empty cellName uses default": { - resourceName: "my-gateway", - componentName: "gateway", - cellName: "", - want: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "my-gateway", - "app.kubernetes.io/component": "gateway", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - }, - "with cellName should add cell label": { - resourceName: "my-orch", - componentName: "orch", - cellName: "cell-alpha", - want: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "my-orch", - "app.kubernetes.io/component": "orch", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "cell-alpha", - }, - }, - "empty resourceName": { - resourceName: "", - componentName: "pooler", - cellName: "cell-2", - want: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "", - "app.kubernetes.io/component": "pooler", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "cell-2", - }, - }, - "empty componentName": { - resourceName: "my-resource", - componentName: "", - cellName: "cell-3", - want: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "my-resource", - "app.kubernetes.io/component": "", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "cell-3", - }, - }, - "all empty strings - uses default cell": { + "empty strings allowed": { resourceName: "", componentName: "", - cellName: "", want: map[string]string{ "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "", "app.kubernetes.io/component": "", "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - }, - "special characters in names": { - resourceName: "my-resource-123", - componentName: "etcd-v3", - cellName: "cell-prod-1", - want: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "my-resource-123", - "app.kubernetes.io/component": "etcd-v3", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "cell-prod-1", - }, - }, - "long names": { - resourceName: "very-long-resource-name-with-many-segments", - componentName: "gateway-proxy-component", - cellName: "cell-production-region-1", - want: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "very-long-resource-name-with-many-segments", - "app.kubernetes.io/component": "gateway-proxy-component", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "cell-production-region-1", }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - got := metadata.BuildStandardLabels(tc.resourceName, tc.componentName, tc.cellName) + got := metadata.BuildStandardLabels(tc.resourceName, tc.componentName) if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("BuildStandardLabels() mismatch (-want +got):\n%s", diff) } @@ -137,7 +54,7 @@ func TestMergeLabels(t *testing.T) { customLabels map[string]string want map[string]string }{ - "both maps populated - standard labels should win on conflicts": { + "standard labels win on conflicts": { standardLabels: map[string]string{ "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "my-resource", @@ -145,188 +62,261 @@ func TestMergeLabels(t *testing.T) { "app.kubernetes.io/managed-by": "multigres-operator", }, customLabels: map[string]string{ - "app.kubernetes.io/name": "user-app", // conflict: standard should win - "custom-label-1": "value1", - "custom-label-2": "value2", + "app.kubernetes.io/name": "user-app", // conflict + "app.kubernetes.io/component": "user-override", // conflict + "env": "production", // no conflict + "team": "platform", // no conflict }, want: map[string]string{ - "app.kubernetes.io/name": "multigres", // Standard wins + "app.kubernetes.io/name": "multigres", "app.kubernetes.io/instance": "my-resource", "app.kubernetes.io/component": "etcd", "app.kubernetes.io/managed-by": "multigres-operator", - "custom-label-1": "value1", - "custom-label-2": "value2", + "env": "production", + "team": "platform", }, }, - "standardLabels nil, customLabels populated": { + "nil maps handled correctly": { + standardLabels: nil, + customLabels: nil, + want: map[string]string{}, + }, + "only custom labels": { standardLabels: nil, customLabels: map[string]string{ - "custom-label-1": "value1", - "custom-label-2": "value2", + "env": "dev", + "team": "platform", }, want: map[string]string{ - "custom-label-1": "value1", - "custom-label-2": "value2", + "env": "dev", + "team": "platform", }, }, - "customLabels nil, standardLabels populated": { + "only standard labels": { standardLabels: map[string]string{ "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "my-resource", "app.kubernetes.io/component": "gateway", }, customLabels: nil, want: map[string]string{ "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "my-resource", "app.kubernetes.io/component": "gateway", }, }, - "both maps nil": { - standardLabels: nil, - customLabels: nil, - want: map[string]string{}, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := metadata.MergeLabels(tc.standardLabels, tc.customLabels) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("MergeLabels() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestAddMultigresLabels(t *testing.T) { + tests := map[string]struct { + addFunc func(map[string]string, string) map[string]string + value string + key string + }{ + "AddCellLabel": { + addFunc: metadata.AddCellLabel, + value: "zone1", + key: "multigres.com/cell", }, - "overlapping keys - standard should override custom": { - standardLabels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "resource-1", - "app.kubernetes.io/component": "orch", - }, - customLabels: map[string]string{ - "app.kubernetes.io/instance": "user-defined-name", // conflict: standard should win - "app.kubernetes.io/component": "user-component", // conflict: standard should win - "user-label": "user-value", // no conflict: should be preserved + "AddClusterLabel": { + addFunc: metadata.AddClusterLabel, + value: "prod-cluster", + key: "multigres.com/cluster", + }, + "AddShardLabel": { + addFunc: metadata.AddShardLabel, + value: "shard-0", + key: "multigres.com/shard", + }, + "AddDatabaseLabel": { + addFunc: metadata.AddDatabaseLabel, + value: "proddb", + key: "multigres.com/database", + }, + "AddTableGroupLabel": { + addFunc: metadata.AddTableGroupLabel, + value: "orders", + key: "multigres.com/tablegroup", + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + labels := map[string]string{ + "app.kubernetes.io/name": "multigres", + } + result := tc.addFunc(labels, tc.value) + + want := map[string]string{ + "app.kubernetes.io/name": "multigres", + tc.key: tc.value, + } + + if diff := cmp.Diff(want, result); diff != "" { + t.Errorf("%s mismatch (-want +got):\n%s", name, diff) + } + + // Verify it modified the original map + if labels[tc.key] != tc.value { + t.Errorf("%s should modify the original map", name) + } + }) + } +} + +func TestLabelOperations_ComplexScenarios(t *testing.T) { + tests := map[string]struct { + setupFunc func() map[string]string + want map[string]string + }{ + "build standard labels then add all multigres labels": { + setupFunc: func() map[string]string { + labels := metadata.BuildStandardLabels("my-shard", "shard-pool") + metadata.AddCellLabel(labels, "zone1") + metadata.AddClusterLabel(labels, "prod-cluster") + metadata.AddShardLabel(labels, "shard-0") + metadata.AddDatabaseLabel(labels, "proddb") + metadata.AddTableGroupLabel(labels, "orders") + return labels }, want: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "resource-1", // Standard wins - "app.kubernetes.io/component": "orch", // Standard wins - "user-label": "user-value", // Custom preserved - }, - }, - "custom labels with keys not in standard - should be preserved": { - standardLabels: map[string]string{ "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "my-shard", + "app.kubernetes.io/component": "shard-pool", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", + "multigres.com/cell": "zone1", + "multigres.com/cluster": "prod-cluster", + "multigres.com/shard": "shard-0", + "multigres.com/database": "proddb", + "multigres.com/tablegroup": "orders", }, - customLabels: map[string]string{ - "env": "production", - "team": "platform", - "version": "v1.2.3", - "app.custom.io/x": "custom-value", + }, + "merge custom labels then add multigres labels": { + setupFunc: func() map[string]string { + standard := metadata.BuildStandardLabels("etcd-1", "etcd") + custom := map[string]string{ + "env": "production", + "team": "platform", + } + labels := metadata.MergeLabels(standard, custom) + metadata.AddCellLabel(labels, "zone2") + metadata.AddClusterLabel(labels, "main-cluster") + return labels }, want: map[string]string{ "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "etcd-1", + "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "app.custom.io/x": "custom-value", + "multigres.com/cell": "zone2", + "multigres.com/cluster": "main-cluster", "env": "production", "team": "platform", - "version": "v1.2.3", }, }, - "empty standard labels map": { - standardLabels: map[string]string{}, - customLabels: map[string]string{ - "custom-label": "value", - }, - want: map[string]string{ - "custom-label": "value", - }, - }, - "empty custom labels map": { - standardLabels: map[string]string{ - "app.kubernetes.io/name": "multigres", + "chain all label operations for shard pool": { + setupFunc: func() map[string]string { + poolName := "shard-0-pool-primary" + labels := metadata.BuildStandardLabels(poolName, "shard-pool") + metadata.AddCellLabel(labels, "us-east-1a") + metadata.AddClusterLabel(labels, "production") + metadata.AddShardLabel(labels, "0") + metadata.AddDatabaseLabel(labels, "orders_db") + metadata.AddTableGroupLabel(labels, "orders_tg") + + custom := map[string]string{ + "monitoring": "prometheus", + "backup": "enabled", + } + return metadata.MergeLabels(labels, custom) }, - customLabels: map[string]string{}, want: map[string]string{ - "app.kubernetes.io/name": "multigres", - }, - }, - "both maps empty": { - standardLabels: map[string]string{}, - customLabels: map[string]string{}, - want: map[string]string{}, - }, - "all standard labels can override custom labels": { - standardLabels: map[string]string{ "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "standard-instance", - "app.kubernetes.io/component": "standard-component", + "app.kubernetes.io/instance": "shard-0-pool-primary", + "app.kubernetes.io/component": "shard-pool", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "standard-cell", + "multigres.com/cell": "us-east-1a", + "multigres.com/cluster": "production", + "multigres.com/shard": "0", + "multigres.com/database": "orders_db", + "multigres.com/tablegroup": "orders_tg", + "monitoring": "prometheus", + "backup": "enabled", }, - customLabels: map[string]string{ - "app.kubernetes.io/name": "custom-app", // conflict: standard wins - "app.kubernetes.io/instance": "custom-instance", // conflict: standard wins - "app.kubernetes.io/component": "custom-component", // conflict: standard wins - "app.kubernetes.io/managed-by": "custom-operator", // conflict: standard wins - "multigres.com/cell": "custom-cell", // conflict: standard wins + }, + "merge with conflicting multigres labels - standard wins": { + setupFunc: func() map[string]string { + labels := metadata.BuildStandardLabels("resource", "component") + metadata.AddCellLabel(labels, "zone1") + metadata.AddShardLabel(labels, "shard-0") + + conflicting := map[string]string{ + "multigres.com/cell": "zone-override", + "multigres.com/shard": "shard-override", + "custom": "value", + } + return metadata.MergeLabels(labels, conflicting) }, want: map[string]string{ - "app.kubernetes.io/name": "multigres", // All standard values win - "app.kubernetes.io/instance": "standard-instance", - "app.kubernetes.io/component": "standard-component", + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "resource", + "app.kubernetes.io/component": "component", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "standard-cell", + "multigres.com/cell": "zone1", + "multigres.com/shard": "shard-0", + "custom": "value", }, }, - "part-of label conflict - standard wins": { - standardLabels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/part-of": "multigres", - }, - customLabels: map[string]string{ - "app.kubernetes.io/part-of": "custom-app", // conflict: standard should win - "custom-label": "value", + "add labels to empty map": { + setupFunc: func() map[string]string { + labels := make(map[string]string) + metadata.AddCellLabel(labels, "zone3") + metadata.AddShardLabel(labels, "shard-1") + metadata.AddDatabaseLabel(labels, "testdb") + return labels }, want: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/part-of": "multigres", // Standard wins - "custom-label": "value", + "multigres.com/cell": "zone3", + "multigres.com/shard": "shard-1", + "multigres.com/database": "testdb", }, }, - "complex scenario with many labels": { - standardLabels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "my-cluster", - "app.kubernetes.io/component": "pooler", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - }, - customLabels: map[string]string{ - "app.kubernetes.io/component": "user-override", // conflict: standard should win - "app.kubernetes.io/part-of": "custom-part", // conflict: standard should win - "app.custom.io/name": "custom-app", - "kubernetes.io/name": "k8s-name", - "monitoring": "enabled", - "backup": "true", - "tier": "critical", - "owner": "team-database", - "cost-center": "engineering", + "overwrite cell label multiple times - last wins": { + setupFunc: func() map[string]string { + labels := metadata.BuildStandardLabels("resource", "component") + metadata.AddCellLabel(labels, "zone1") + metadata.AddCellLabel(labels, "zone2") + metadata.AddCellLabel(labels, "zone3") + return labels }, want: map[string]string{ "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "my-cluster", - "app.kubernetes.io/component": "pooler", // Standard wins - "app.kubernetes.io/part-of": "multigres", // Standard wins + "app.kubernetes.io/instance": "resource", + "app.kubernetes.io/component": "component", + "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "app.custom.io/name": "custom-app", - "kubernetes.io/name": "k8s-name", - "monitoring": "enabled", - "backup": "true", - "tier": "critical", - "owner": "team-database", - "cost-center": "engineering", + "multigres.com/cell": "zone3", }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - got := metadata.MergeLabels(tc.standardLabels, tc.customLabels) + got := tc.setupFunc() if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("MergeLabels() mismatch (-want +got):\n%s", diff) + t.Errorf("Label operations mismatch (-want +got):\n%s", diff) } }) } diff --git a/pkg/resource-handler/controller/multigateway/container_env.go b/pkg/resource-handler/controller/multigateway/container_env.go deleted file mode 100644 index 12881adf..00000000 --- a/pkg/resource-handler/controller/multigateway/container_env.go +++ /dev/null @@ -1,34 +0,0 @@ -package multigateway - -import ( - corev1 "k8s.io/api/core/v1" -) - -// buildContainerEnv constructs all environment variables for the MultiGateway -// container. -func buildContainerEnv() []corev1.EnvVar { - envVars := []corev1.EnvVar{ - { - // TODO: get etcd endpoints and forward them to MultiGateway - Name: "ETCD_ENDPOINTS", - Value: "", - }, - { - // TODO: is there an env var for HTTP port? - Name: "HTTP_PORT", - Value: "", - }, - { - // TODO: is there an env var for GRPC port? - Name: "GRPC_PORT", - Value: "", - }, - { - // TODO: is there an env var for Postgres port? - Name: "POSTGRES_PORT", - Value: "", - }, - } - - return envVars -} diff --git a/pkg/resource-handler/controller/multigateway/deployment.go b/pkg/resource-handler/controller/multigateway/deployment.go deleted file mode 100644 index 194c1dc6..00000000 --- a/pkg/resource-handler/controller/multigateway/deployment.go +++ /dev/null @@ -1,89 +0,0 @@ -package multigateway - -import ( - "fmt" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - - multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" - "github.com/numtide/multigres-operator/pkg/resource-handler/controller/metadata" -) - -const ( - // ComponentName is the component label value for MultiGateway resources - ComponentName = "multigateway" - - // DefaultReplicas is the default number of MultiGateway replicas - DefaultReplicas int32 = 2 - - // DefaultImage is the default etcd container image - DefaultImage = "numtide/multigres-operator:latest" -) - -// BuildDeployment creates a Deployment for the Etcd cluster. -// Returns a deterministic Deployment based on the Etcd spec. -func BuildDeployment( - mg *multigresv1alpha1.MultiGateway, - scheme *runtime.Scheme, -) (*appsv1.Deployment, error) { - replicas := DefaultReplicas - // TODO: Debatable whether this defaulting makes sense. - if mg.Spec.Replicas != nil { - replicas = *mg.Spec.Replicas - } - - image := DefaultImage - if mg.Spec.Image != "" { - image = mg.Spec.Image - } - - labels := metadata.BuildStandardLabels(mg.Name, ComponentName, mg.Spec.CellName) - podLabels := metadata.MergeLabels(labels, mg.Spec.PodLabels) - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: mg.Name, - Namespace: mg.Namespace, - Labels: labels, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: labels, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: podLabels, - Annotations: mg.Spec.PodAnnotations, - }, - Spec: corev1.PodSpec{ - ServiceAccountName: mg.Spec.ServiceAccountName, - ImagePullSecrets: mg.Spec.ImagePullSecrets, - Containers: []corev1.Container{ - { - Name: "multigateway", - Image: image, - Resources: mg.Spec.Resources, - Env: buildContainerEnv(), - Ports: buildContainerPorts(mg), - }, - }, - Affinity: mg.Spec.Affinity, - Tolerations: mg.Spec.Tolerations, - NodeSelector: mg.Spec.NodeSelector, - TopologySpreadConstraints: mg.Spec.TopologySpreadConstraints, - }, - }, - }, - } - - if err := ctrl.SetControllerReference(mg, deployment, scheme); err != nil { - return nil, fmt.Errorf("failed to set controller reference: %w", err) - } - - return deployment, nil -} diff --git a/pkg/resource-handler/controller/multigateway/deployment_test.go b/pkg/resource-handler/controller/multigateway/deployment_test.go deleted file mode 100644 index 87ddd262..00000000 --- a/pkg/resource-handler/controller/multigateway/deployment_test.go +++ /dev/null @@ -1,213 +0,0 @@ -package multigateway - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" -) - -func int32Ptr(i int32) *int32 { - return &i -} - -func boolPtr(b bool) *bool { - return &b -} - -func TestBuildDeployment(t *testing.T) { - scheme := runtime.NewScheme() - _ = multigresv1alpha1.AddToScheme(scheme) - - tests := map[string]struct { - mg *multigresv1alpha1.MultiGateway - scheme *runtime.Scheme - want *appsv1.Deployment - wantErr bool - }{ - "minimal spec - all defaults": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - UID: "test-uid", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - scheme: scheme, - want: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - Labels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-multigateway", - "app.kubernetes.io/component": "multigateway", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "multigres.com/v1alpha1", - Kind: "MultiGateway", - Name: "test-multigateway", - UID: "test-uid", - Controller: boolPtr(true), - BlockOwnerDeletion: boolPtr(true), - }, - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: int32Ptr(2), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-multigateway", - "app.kubernetes.io/component": "multigateway", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-multigateway", - "app.kubernetes.io/component": "multigateway", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "multigateway", - Image: DefaultImage, - Resources: corev1.ResourceRequirements{}, - Env: buildContainerEnv(), - Ports: buildContainerPorts( - &multigresv1alpha1.MultiGateway{}, - ), - }, - }, - }, - }, - }, - }, - }, - "custom replicas and image": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - UID: "test-uid", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{ - Replicas: int32Ptr(3), - Image: "foo/bar:1.2.3", - }, - }, - scheme: scheme, - want: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - Labels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-multigateway", - "app.kubernetes.io/component": "multigateway", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "multigres.com/v1alpha1", - Kind: "MultiGateway", - Name: "test-multigateway", - UID: "test-uid", - Controller: boolPtr(true), - BlockOwnerDeletion: boolPtr(true), - }, - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: int32Ptr(3), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-multigateway", - "app.kubernetes.io/component": "multigateway", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-multigateway", - "app.kubernetes.io/component": "multigateway", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "multigateway", - Image: "foo/bar:1.2.3", - Resources: corev1.ResourceRequirements{}, - Env: buildContainerEnv(), - Ports: buildContainerPorts( - &multigresv1alpha1.MultiGateway{}, - ), - }, - }, - }, - }, - }, - }, - }, - "scheme with incorrect type - should error": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - scheme: runtime.NewScheme(), // empty scheme with incorrect type - wantErr: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got, err := BuildDeployment(tc.mg, tc.scheme) - - if (err != nil) != tc.wantErr { - t.Errorf("BuildDeployment() error = %v, wantErr %v", err, tc.wantErr) - return - } - - if tc.wantErr { - return - } - - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("BuildDeployment() mismatch (-want +got):\n%s", diff) - } - }) - } -} diff --git a/pkg/resource-handler/controller/multigateway/multigateway_controller.go b/pkg/resource-handler/controller/multigateway/multigateway_controller.go deleted file mode 100644 index 49f9d792..00000000 --- a/pkg/resource-handler/controller/multigateway/multigateway_controller.go +++ /dev/null @@ -1,247 +0,0 @@ -package multigateway - -import ( - "context" - "fmt" - "slices" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - - multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" -) - -const ( - finalizerName = "multigateway.multigres.com/finalizer" -) - -// MultiGatewayReconciler reconciles an MultiGateway object. -type MultiGatewayReconciler struct { - client.Client - Scheme *runtime.Scheme -} - -// +kubebuilder:rbac:groups=multigres.com,resources=multigateways,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=multigres.com,resources=multigateways/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=multigres.com,resources=multigateways/finalizers,verbs=update -// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete - -// Reconcile handles MultiGateway resource reconciliation. -func (r *MultiGatewayReconciler) Reconcile( - ctx context.Context, - req ctrl.Request, -) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // Fetch the MultiGateway instance - mg := &multigresv1alpha1.MultiGateway{} - if err := r.Get(ctx, req.NamespacedName, mg); err != nil { - if errors.IsNotFound(err) { - logger.Info("MultiGateway resource not found, ignoring") - return ctrl.Result{}, nil - } - logger.Error(err, "Failed to get MultiGateway") - return ctrl.Result{}, err - } - - // Handle deletion - if !mg.DeletionTimestamp.IsZero() { - return r.handleDeletion(ctx, mg) - } - - // Add finalizer if not present - if !slices.Contains(mg.Finalizers, finalizerName) { - mg.Finalizers = append(mg.Finalizers, finalizerName) - if err := r.Update(ctx, mg); err != nil { - logger.Error(err, "Failed to add finalizer") - return ctrl.Result{}, err - } - } - - // Reconcile StatefulSet - if err := r.reconcileDeployment(ctx, mg); err != nil { - logger.Error(err, "Failed to reconcile Deployment") - return ctrl.Result{}, err - } - - // Reconcile Service - if err := r.reconcileService(ctx, mg); err != nil { - logger.Error(err, "Failed to reconcile client Service") - return ctrl.Result{}, err - } - - // Update status - if err := r.updateStatus(ctx, mg); err != nil { - logger.Error(err, "Failed to update status") - return ctrl.Result{}, err - } - - return ctrl.Result{}, nil -} - -// handleDeletion handles cleanup when MultiGateway is being deleted. -func (r *MultiGatewayReconciler) handleDeletion( - ctx context.Context, - mg *multigresv1alpha1.MultiGateway, -) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - if slices.Contains(mg.Finalizers, finalizerName) { - // Perform cleanup if needed - // Currently no special cleanup required - owner references handle resource deletion - - // Remove finalizer - mg.Finalizers = slices.DeleteFunc(mg.Finalizers, func(s string) bool { - return s == finalizerName - }) - if err := r.Update(ctx, mg); err != nil { - logger.Error(err, "Failed to remove finalizer") - return ctrl.Result{}, err - } - } - - return ctrl.Result{}, nil -} - -// reconcileDeployment creates or updates the Deployment for MultiGateway. -func (r *MultiGatewayReconciler) reconcileDeployment( - ctx context.Context, - mg *multigresv1alpha1.MultiGateway, -) error { - desired, err := BuildDeployment(mg, r.Scheme) - if err != nil { - return fmt.Errorf("failed to build Deployment: %w", err) - } - - existing := &appsv1.Deployment{} - err = r.Get(ctx, client.ObjectKey{Namespace: mg.Namespace, Name: mg.Name}, existing) - if err != nil { - if errors.IsNotFound(err) { - // Create new Deployment - if err := r.Create(ctx, desired); err != nil { - return fmt.Errorf("failed to create Deployment: %w", err) - } - return nil - } - return fmt.Errorf("failed to get Deployment: %w", err) - } - - // Update existing Deployment - existing.Spec = desired.Spec - existing.Labels = desired.Labels - if err := r.Update(ctx, existing); err != nil { - return fmt.Errorf("failed to update Deployment: %w", err) - } - - return nil -} - -// reconcileService creates or updates the client Service for MultiGateway. -func (r *MultiGatewayReconciler) reconcileService( - ctx context.Context, - mg *multigresv1alpha1.MultiGateway, -) error { - desired, err := BuildService(mg, r.Scheme) - if err != nil { - return fmt.Errorf("failed to build Service: %w", err) - } - - existing := &corev1.Service{} - err = r.Get(ctx, client.ObjectKey{Namespace: mg.Namespace, Name: mg.Name}, existing) - if err != nil { - if errors.IsNotFound(err) { - // Create new Service - if err := r.Create(ctx, desired); err != nil { - return fmt.Errorf("failed to create Service: %w", err) - } - return nil - } - return fmt.Errorf("failed to get Service: %w", err) - } - - // Update existing Service - existing.Spec.Ports = desired.Spec.Ports - existing.Spec.Selector = desired.Spec.Selector - existing.Labels = desired.Labels - if err := r.Update(ctx, existing); err != nil { - return fmt.Errorf("failed to update Service: %w", err) - } - - return nil -} - -// updateStatus updates the Etcd status based on observed state. -func (r *MultiGatewayReconciler) updateStatus( - ctx context.Context, - mg *multigresv1alpha1.MultiGateway, -) error { - // Get the Deployment to check status - dp := &appsv1.Deployment{} - err := r.Get(ctx, client.ObjectKey{Namespace: mg.Namespace, Name: mg.Name}, dp) - if err != nil { - if errors.IsNotFound(err) { - // Deployment not created yet - return nil - } - return fmt.Errorf("failed to get Deployment for status: %w", err) - } - - // Update status fields - mg.Status.Replicas = dp.Status.Replicas - mg.Status.ReadyReplicas = dp.Status.ReadyReplicas - mg.Status.Ready = dp.Status.ReadyReplicas == dp.Status.Replicas && dp.Status.Replicas > 0 - mg.Status.ObservedGeneration = mg.Generation - - // Update conditions - mg.Status.Conditions = r.buildConditions(mg, dp) - - if err := r.Status().Update(ctx, mg); err != nil { - return fmt.Errorf("failed to update status: %w", err) - } - - return nil -} - -// buildConditions creates status conditions based on observed state. -func (r *MultiGatewayReconciler) buildConditions( - mg *multigresv1alpha1.MultiGateway, - sts *appsv1.Deployment, -) []metav1.Condition { - conditions := []metav1.Condition{} - - // Ready condition - readyCondition := metav1.Condition{ - Type: "Ready", - ObservedGeneration: mg.Generation, - LastTransitionTime: metav1.Now(), - } - - if sts.Status.ReadyReplicas == sts.Status.Replicas && sts.Status.Replicas > 0 { - readyCondition.Status = metav1.ConditionTrue - readyCondition.Reason = "AllReplicasReady" - readyCondition.Message = fmt.Sprintf("All %d replicas are ready", sts.Status.ReadyReplicas) - } else { - readyCondition.Status = metav1.ConditionFalse - readyCondition.Reason = "NotAllReplicasReady" - readyCondition.Message = fmt.Sprintf("%d/%d replicas ready", sts.Status.ReadyReplicas, sts.Status.Replicas) - } - - conditions = append(conditions, readyCondition) - return conditions -} - -// SetupWithManager sets up the controller with the Manager. -func (r *MultiGatewayReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&multigresv1alpha1.MultiGateway{}). - Owns(&appsv1.Deployment{}). - Owns(&corev1.Service{}). - Complete(r) -} diff --git a/pkg/resource-handler/controller/multigateway/multigateway_controller_internal_test.go b/pkg/resource-handler/controller/multigateway/multigateway_controller_internal_test.go deleted file mode 100644 index e05f7ff4..00000000 --- a/pkg/resource-handler/controller/multigateway/multigateway_controller_internal_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package multigateway - -import ( - "context" - "testing" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - - multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" - "github.com/numtide/multigres-operator/pkg/resource-handler/controller/testutil" -) - -// TestReconcileDeployment_InvalidScheme tests the error path when BuildDeployment fails. -// This should never happen in production - scheme is properly set up in main.go. -// Test exists for coverage of defensive error handling. -func TestReconcileDeployment_InvalidScheme(t *testing.T) { - // Empty scheme without Etcd type registered - invalidScheme := runtime.NewScheme() - - mg := &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - } - - fakeClient := fake.NewClientBuilder(). - WithScheme(invalidScheme). - Build() - - reconciler := &MultiGatewayReconciler{ - Client: fakeClient, - Scheme: invalidScheme, - } - - err := reconciler.reconcileDeployment(context.Background(), mg) - if err == nil { - t.Error("reconcileDeployment() should error with invalid scheme") - } -} - -// TestReconcileService_InvalidScheme tests the error path when BuildClientService fails. -func TestReconcileService_InvalidScheme(t *testing.T) { - invalidScheme := runtime.NewScheme() - - mg := &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - } - - fakeClient := fake.NewClientBuilder(). - WithScheme(invalidScheme). - Build() - - reconciler := &MultiGatewayReconciler{ - Client: fakeClient, - Scheme: invalidScheme, - } - - err := reconciler.reconcileService(context.Background(), mg) - if err == nil { - t.Error("reconcileService() should error with invalid scheme") - } -} - -// TestUpdateStatus_DeploymentNotFound tests the NotFound path in updateStatus. -func TestUpdateStatus_DeploymentNotFound(t *testing.T) { - scheme := runtime.NewScheme() - _ = multigresv1alpha1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) // Need Deployment type registered for Get to work - - mg := &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - } - - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(mg). - WithStatusSubresource(&multigresv1alpha1.MultiGateway{}). - Build() - - reconciler := &MultiGatewayReconciler{ - Client: fakeClient, - Scheme: scheme, - } - - // Call updateStatus when Deployment doesn't exist yet - err := reconciler.updateStatus(context.Background(), mg) - if err != nil { - t.Errorf("updateStatus() should not error when Deployment not found, got: %v", err) - } -} - -// TestHandleDeletion_NoFinalizer tests early return when no finalizer is present. -func TestHandleDeletion_NoFinalizer(t *testing.T) { - scheme := runtime.NewScheme() - _ = multigresv1alpha1.AddToScheme(scheme) - - mg := &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - Finalizers: []string{}, // No finalizer - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - } - - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(mg). - Build() - - reconciler := &MultiGatewayReconciler{ - Client: fakeClient, - Scheme: scheme, - } - - result, err := reconciler.handleDeletion(context.Background(), mg) - if err != nil { - t.Errorf("handleDeletion() should not error when no finalizer, got: %v", err) - } - if result.RequeueAfter > 0 { - t.Error("handleDeletion() should not requeue when no finalizer") - } -} - -// TestReconcileService_GetError tests error path on Get client Service (not NotFound). -func TestReconcileService_GetError(t *testing.T) { - scheme := runtime.NewScheme() - _ = multigresv1alpha1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - _ = corev1.AddToScheme(scheme) - - mg := &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - } - - // Create client with failure injection - baseClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(mg). - Build() - - fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ - OnGet: testutil.FailOnKeyName("test-multigateway", testutil.ErrNetworkTimeout), - }) - - reconciler := &MultiGatewayReconciler{ - Client: fakeClient, - Scheme: scheme, - } - - err := reconciler.reconcileService(context.Background(), mg) - if err == nil { - t.Error("reconcileService() should error on Get failure") - } -} - -// TestUpdateStatus_GetError tests error path on Get StatefulSet (not NotFound). -func TestUpdateStatus_GetError(t *testing.T) { - scheme := runtime.NewScheme() - _ = multigresv1alpha1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - - mg := &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - } - - baseClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(mg). - WithStatusSubresource(&multigresv1alpha1.MultiGateway{}). - Build() - - fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ - OnGet: testutil.FailOnKeyName("test-multigateway", testutil.ErrNetworkTimeout), - }) - - reconciler := &MultiGatewayReconciler{ - Client: fakeClient, - Scheme: scheme, - } - - err := reconciler.updateStatus(context.Background(), mg) - if err == nil { - t.Error("updateStatus() should error on Get failure") - } -} diff --git a/pkg/resource-handler/controller/multigateway/multigateway_controller_test.go b/pkg/resource-handler/controller/multigateway/multigateway_controller_test.go deleted file mode 100644 index fe257bbe..00000000 --- a/pkg/resource-handler/controller/multigateway/multigateway_controller_test.go +++ /dev/null @@ -1,643 +0,0 @@ -package multigateway - -import ( - "slices" - "testing" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - - multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" - "github.com/numtide/multigres-operator/pkg/resource-handler/controller/testutil" -) - -func TestMultiGatewayReconciler_Reconcile(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = multigresv1alpha1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - _ = corev1.AddToScheme(scheme) - - tests := map[string]struct { - mg *multigresv1alpha1.MultiGateway - existingObjects []client.Object - failureConfig *testutil.FailureConfig - // TODO: If wantErr is false but failureConfig is set, assertions may fail - // due to failure injection. This should be addressed when we need to test - // partial failures that don't prevent reconciliation success. - wantErr bool - wantRequeue bool - assertFunc func(t *testing.T, c client.Client, mg *multigresv1alpha1.MultiGateway) - }{ - ////---------------------------------------- - /// Success - //------------------------------------------ - "create all resources for new MultiGateway": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - existingObjects: []client.Object{}, - assertFunc: func(t *testing.T, c client.Client, mg *multigresv1alpha1.MultiGateway) { - // Verify all three resources were created - sts := &appsv1.Deployment{} - if err := c.Get(t.Context(), - types.NamespacedName{Name: "test-multigateway", Namespace: "default"}, - sts); err != nil { - t.Errorf("Deployment should exist: %v", err) - } - - svc := &corev1.Service{} - if err := c.Get(t.Context(), - types.NamespacedName{Name: "test-multigateway", Namespace: "default"}, - svc); err != nil { - t.Errorf("Service should exist: %v", err) - } - - // Verify defaults and finalizer - if *sts.Spec.Replicas != DefaultReplicas { - t.Errorf( - "Deployment replicas = %d, want %d", - *sts.Spec.Replicas, - DefaultReplicas, - ) - } - - updatedMultiGateway := &multigresv1alpha1.MultiGateway{} - if err := c.Get(t.Context(), types.NamespacedName{Name: "test-multigateway", Namespace: "default"}, updatedMultiGateway); err != nil { - t.Fatalf("Failed to get MultiGateway: %v", err) - } - if !slices.Contains(updatedMultiGateway.Finalizers, finalizerName) { - t.Errorf("Finalizer should be added") - } - }, - }, - "update existing resources": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "existing-multigateway", - Namespace: "default", - Finalizers: []string{finalizerName}, - }, - Spec: multigresv1alpha1.MultiGatewaySpec{ - Replicas: int32Ptr(5), - Image: "foo/bar:1.2.3", - }, - }, - existingObjects: []client.Object{ - &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "existing-multigateway", - Namespace: "default", - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: int32Ptr(3), // will be updated to 5 - }, - Status: appsv1.StatefulSetStatus{ - Replicas: 3, - ReadyReplicas: 3, - }, - }, - &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "existing-multigateway-headless", - Namespace: "default", - }, - }, - &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "existing-multigateway", - Namespace: "default", - }, - }, - }, - assertFunc: func(t *testing.T, c client.Client, mg *multigresv1alpha1.MultiGateway) { - dp := &appsv1.Deployment{} - err := c.Get(t.Context(), types.NamespacedName{ - Name: "existing-multigateway", - Namespace: "default", - }, dp) - if err != nil { - t.Fatalf("Failed to get Deployment: %v", err) - } - - if *dp.Spec.Replicas != 5 { - t.Errorf("Deployment replicas = %d, want 5", *dp.Spec.Replicas) - } - - if dp.Spec.Template.Spec.Containers[0].Image != "foo/bar:1.2.3" { - t.Errorf( - "Deployment image = %s, want foo/bar:1.2.3", - dp.Spec.Template.Spec.Containers[0].Image, - ) - } - }, - }, - "MultiGateway with cellName": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "multigateway-zone1", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{ - CellName: "zone1", - }, - }, - existingObjects: []client.Object{}, - assertFunc: func(t *testing.T, c client.Client, mg *multigresv1alpha1.MultiGateway) { - dp := &appsv1.Deployment{} - if err := c.Get(t.Context(), - types.NamespacedName{Name: "multigateway-zone1", Namespace: "default"}, - dp); err != nil { - t.Fatalf("Failed to get Deployment: %v", err) - } - if dp.Labels["multigres.com/cell"] != "zone1" { - t.Errorf( - "Deployment cell label = %s, want zone1", - dp.Labels["multigres.com/cell"], - ) - } - - svc := &corev1.Service{} - if err := c.Get(t.Context(), - types.NamespacedName{Name: "multigateway-zone1", Namespace: "default"}, - svc); err != nil { - t.Fatalf("Failed to get Service: %v", err) - } - if svc.Labels["multigres.com/cell"] != "zone1" { - t.Errorf( - "Service cell label = %s, want zone1", - svc.Labels["multigres.com/cell"], - ) - } - }, - }, - "deletion with finalizer": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway-deletion", - Namespace: "default", - DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, - Finalizers: []string{finalizerName}, - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - existingObjects: []client.Object{ - &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway-deletion", - Namespace: "default", - DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, - Finalizers: []string{finalizerName}, - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - }, - assertFunc: func(t *testing.T, c client.Client, multigateway *multigresv1alpha1.MultiGateway) { - updatedMultiGateway := &multigresv1alpha1.MultiGateway{} - err := c.Get(t.Context(), - types.NamespacedName{Name: "test-multigateway-deletion", Namespace: "default"}, - updatedMultiGateway) - if err == nil { - t.Errorf( - "MultiGateway object should be deleted but still exists (finalizers: %v)", - updatedMultiGateway.Finalizers, - ) - } - }, - }, - "all replicas ready status": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway-ready", - Namespace: "default", - Finalizers: []string{finalizerName}, - }, - Spec: multigresv1alpha1.MultiGatewaySpec{ - Replicas: int32Ptr(3), - }, - }, - existingObjects: []client.Object{ - &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway-ready", - Namespace: "default", - }, - Spec: appsv1.DeploymentSpec{ - Replicas: int32Ptr(3), - }, - Status: appsv1.DeploymentStatus{ - Replicas: 3, - ReadyReplicas: 3, - }, - }, - }, - assertFunc: func(t *testing.T, c client.Client, multigateway *multigresv1alpha1.MultiGateway) { - updatedMultiGateway := &multigresv1alpha1.MultiGateway{} - if err := c.Get(t.Context(), - types.NamespacedName{Name: "test-multigateway-ready", Namespace: "default"}, - updatedMultiGateway); err != nil { - t.Fatalf("Failed to get MultiGateway: %v", err) - } - - if !updatedMultiGateway.Status.Ready { - t.Error("Status.Ready should be true") - } - if updatedMultiGateway.Status.Replicas != 3 { - t.Errorf("Status.Replicas = %d, want 3", updatedMultiGateway.Status.Replicas) - } - if updatedMultiGateway.Status.ReadyReplicas != 3 { - t.Errorf( - "Status.ReadyReplicas = %d, want 3", - updatedMultiGateway.Status.ReadyReplicas, - ) - } - if len(updatedMultiGateway.Status.Conditions) == 0 { - t.Error("Status.Conditions should not be empty") - } else { - readyCondition := updatedMultiGateway.Status.Conditions[0] - if readyCondition.Type != "Ready" { - t.Errorf("Condition type = %s, want Ready", readyCondition.Type) - } - if readyCondition.Status != metav1.ConditionTrue { - t.Errorf("Condition status = %s, want True", readyCondition.Status) - } - } - - if !slices.Contains(updatedMultiGateway.Finalizers, finalizerName) { - t.Errorf("Finalizer should be present") - } - }, - }, - ////---------------------------------------- - /// Error - //------------------------------------------ - "error on status update": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - existingObjects: []client.Object{}, - failureConfig: &testutil.FailureConfig{ - OnStatusUpdate: testutil.FailOnObjectName( - "test-multigateway", - testutil.ErrInjected, - ), - }, - wantErr: true, - }, - "error on Get Deployment in updateStatus (network error)": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway-status", - Namespace: "default", - Finalizers: []string{finalizerName}, - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - existingObjects: []client.Object{ - &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway-status", - Namespace: "default", - }, - }, - }, - failureConfig: &testutil.FailureConfig{ - // Fail Deployment Get after first successful call - // First Get succeeds (in reconcileDeployment) - // Second Get fails (in updateStatus) - OnGet: testutil.FailKeyAfterNCalls(1, testutil.ErrNetworkTimeout), - }, - wantErr: true, - }, - "error on Service create": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - existingObjects: []client.Object{}, - failureConfig: &testutil.FailureConfig{ - OnCreate: func(obj client.Object) error { - if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-multigateway" { - return testutil.ErrPermissionError - } - return nil - }, - }, - wantErr: true, - }, - "error on Service Update": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - Finalizers: []string{finalizerName}, - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - existingObjects: []client.Object{ - &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - }, - &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway-headless", - Namespace: "default", - }, - }, - &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - }, - }, - failureConfig: &testutil.FailureConfig{ - OnUpdate: func(obj client.Object) error { - if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-multigateway" { - return testutil.ErrInjected - } - return nil - }, - }, - wantErr: true, - }, - "error on Get Service (network error)": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway-svc", - Namespace: "default", - Finalizers: []string{finalizerName}, - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - existingObjects: []client.Object{ - &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway-svc", - Namespace: "default", - }, - }, - &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway-svc-headless", - Namespace: "default", - }, - }, - }, - failureConfig: &testutil.FailureConfig{ - OnGet: testutil.FailOnNamespacedKeyName( - "test-multigateway-svc", - "default", - testutil.ErrNetworkTimeout, - ), - }, - wantErr: true, - }, - "error on Deployment create": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - existingObjects: []client.Object{}, - failureConfig: &testutil.FailureConfig{ - OnCreate: func(obj client.Object) error { - if _, ok := obj.(*appsv1.Deployment); ok { - return testutil.ErrPermissionError - } - return nil - }, - }, - wantErr: true, - }, - "error on Deployment Update": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - Finalizers: []string{finalizerName}, - }, - Spec: multigresv1alpha1.MultiGatewaySpec{ - Replicas: int32Ptr(5), - }, - }, - existingObjects: []client.Object{ - &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - Spec: appsv1.DeploymentSpec{ - Replicas: int32Ptr(3), - }, - }, - }, - failureConfig: &testutil.FailureConfig{ - OnUpdate: func(obj client.Object) error { - if _, ok := obj.(*appsv1.Deployment); ok { - return testutil.ErrInjected - } - return nil - }, - }, - wantErr: true, - }, - "error on Get Deployment (network error)": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - Finalizers: []string{finalizerName}, - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - existingObjects: []client.Object{}, - failureConfig: &testutil.FailureConfig{ - OnGet: func(key client.ObjectKey) error { - if key.Name == "test-multigateway" { - return testutil.ErrNetworkTimeout - } - return nil - }, - }, - wantErr: true, - }, - "error on finalizer Update": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - existingObjects: []client.Object{}, - failureConfig: &testutil.FailureConfig{ - OnUpdate: testutil.FailOnObjectName("test-multigateway", testutil.ErrInjected), - }, - wantErr: true, - }, - "deletion error on finalizer removal": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway-del", - Namespace: "default", - DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, - Finalizers: []string{finalizerName}, - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - existingObjects: []client.Object{ - &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway-del", - Namespace: "default", - DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, - Finalizers: []string{finalizerName}, - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - }, - failureConfig: &testutil.FailureConfig{ - OnUpdate: testutil.FailOnObjectName("test-multigateway-del", testutil.ErrInjected), - }, - wantErr: true, - }, - "error on Get MultiGateway (network error)": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - existingObjects: []client.Object{}, - failureConfig: &testutil.FailureConfig{ - OnGet: testutil.FailOnKeyName("test-multigateway", testutil.ErrNetworkTimeout), - }, - wantErr: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - t.Parallel() - - // Create base fake client - baseClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(tc.existingObjects...). - WithStatusSubresource(&multigresv1alpha1.MultiGateway{}). - Build() - - fakeClient := client.Client(baseClient) - // Wrap with failure injection if configured - if tc.failureConfig != nil { - fakeClient = testutil.NewFakeClientWithFailures(baseClient, tc.failureConfig) - } - - reconciler := &MultiGatewayReconciler{ - Client: fakeClient, - Scheme: scheme, - } - - // Create the MultiGateway resource if not in existing objects - mgInExisting := false - for _, obj := range tc.existingObjects { - if mg, ok := obj.(*multigresv1alpha1.MultiGateway); ok && mg.Name == tc.mg.Name { - mgInExisting = true - break - } - } - if !mgInExisting { - err := fakeClient.Create(t.Context(), tc.mg) - if err != nil { - t.Fatalf("Failed to create MultiGateway: %v", err) - } - } - - // Reconcile - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: tc.mg.Name, - Namespace: tc.mg.Namespace, - }, - } - - result, err := reconciler.Reconcile(t.Context(), req) - if (err != nil) != tc.wantErr { - t.Errorf("Reconcile() error = %v, wantErr %v", err, tc.wantErr) - return - } - if tc.wantErr { - return - } - - // NOTE: Check for requeue delay when we need to support such setup. - _ = result - // // Check requeue - // if (result.RequeueAfter != 0) != tc.wantRequeue { - // t.Errorf("Reconcile() result.Requeue = %v, want %v", result.RequeueAfter, tc.wantRequeue) - // } - - // Run custom assertions if provided - if tc.assertFunc != nil { - tc.assertFunc(t, fakeClient, tc.mg) - } - }) - } -} - -func TestMultiGatewayReconciler_ReconcileNotFound(t *testing.T) { - scheme := runtime.NewScheme() - _ = multigresv1alpha1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - _ = corev1.AddToScheme(scheme) - - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - Build() - - reconciler := &MultiGatewayReconciler{ - Client: fakeClient, - Scheme: scheme, - } - - // Reconcile non-existent resource - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "nonexistent-multigateway", - Namespace: "default", - }, - } - - result, err := reconciler.Reconcile(t.Context(), req) - if err != nil { - t.Errorf("Reconcile() should not error on NotFound, got: %v", err) - } - if result.RequeueAfter > 0 { - t.Errorf("Reconcile() should not requeue on NotFound") - } -} diff --git a/pkg/resource-handler/controller/multigateway/ports.go b/pkg/resource-handler/controller/multigateway/ports.go deleted file mode 100644 index 25ecf271..00000000 --- a/pkg/resource-handler/controller/multigateway/ports.go +++ /dev/null @@ -1,94 +0,0 @@ -package multigateway - -import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" - - multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" -) - -const ( - // HTTPPort is the default port for HTTP connections. - HTTPPort int32 = 15100 - - // GRPCPort is the default port for GRPC connections. - GRPCPort int32 = 15170 - - // PostgresPort is the default port for database connections. - PostgresPort int32 = 15432 -) - -// buildContainerPorts creates the port definitions for the etcd container. -// Uses default ports since MultiGatewaySpec doesn't have port configuration yet. -func buildContainerPorts(mg *multigresv1alpha1.MultiGateway) []corev1.ContainerPort { - httpPort := HTTPPort - grpcPort := GRPCPort - postgresPort := PostgresPort - - if mg.Spec.HTTPPort != 0 { - httpPort = mg.Spec.HTTPPort - } - if mg.Spec.GRPCPort != 0 { - grpcPort = mg.Spec.GRPCPort - } - if mg.Spec.PostgresPort != 0 { - postgresPort = mg.Spec.PostgresPort - } - - return []corev1.ContainerPort{ - { - Name: "http", - ContainerPort: httpPort, - Protocol: corev1.ProtocolTCP, - }, - { - Name: "grpc", - ContainerPort: grpcPort, - Protocol: corev1.ProtocolTCP, - }, - { - Name: "postgres", - ContainerPort: postgresPort, - Protocol: corev1.ProtocolTCP, - }, - } -} - -// buildServicePorts creates service ports for the client service. -// Only includes the client port for external access. -func buildServicePorts(mg *multigresv1alpha1.MultiGateway) []corev1.ServicePort { - httpPort := HTTPPort - grpcPort := GRPCPort - postgresPort := PostgresPort - - if mg.Spec.HTTPPort != 0 { - httpPort = mg.Spec.HTTPPort - } - if mg.Spec.GRPCPort != 0 { - grpcPort = mg.Spec.GRPCPort - } - if mg.Spec.PostgresPort != 0 { - postgresPort = mg.Spec.PostgresPort - } - - return []corev1.ServicePort{ - { - Name: "http", - Port: httpPort, - Protocol: corev1.ProtocolTCP, - TargetPort: intstr.FromString("http"), - }, - { - Name: "grpc", - Port: grpcPort, - Protocol: corev1.ProtocolTCP, - TargetPort: intstr.FromString("grpc"), - }, - { - Name: "postgres", - Port: postgresPort, - Protocol: corev1.ProtocolTCP, - TargetPort: intstr.FromString("postgres"), - }, - } -} diff --git a/pkg/resource-handler/controller/multigateway/ports_test.go b/pkg/resource-handler/controller/multigateway/ports_test.go deleted file mode 100644 index 3ce18c1d..00000000 --- a/pkg/resource-handler/controller/multigateway/ports_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package multigateway - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - - multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" -) - -func TestBuildContainerPorts(t *testing.T) { - tests := map[string]struct { - mg *multigresv1alpha1.MultiGateway - want []corev1.ContainerPort - }{ - "default ports": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - want: []corev1.ContainerPort{ - { - Name: "http", - ContainerPort: 15100, - Protocol: corev1.ProtocolTCP, - }, - { - Name: "grpc", - ContainerPort: 15170, - Protocol: corev1.ProtocolTCP, - }, - { - Name: "postgres", - ContainerPort: 15432, - Protocol: corev1.ProtocolTCP, - }, - }, - }, - "custom ports": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{ - HTTPPort: 1, - GRPCPort: 2, - PostgresPort: 3, - }, - }, - want: []corev1.ContainerPort{ - { - Name: "http", - ContainerPort: 1, - Protocol: corev1.ProtocolTCP, - }, - - { - Name: "grpc", - ContainerPort: 2, - Protocol: corev1.ProtocolTCP, - }, - { - Name: "postgres", - ContainerPort: 3, - Protocol: corev1.ProtocolTCP, - }, - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got := buildContainerPorts(tc.mg) - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("buildContainerPorts() mismatch (-want +got):\n%s", diff) - } - }) - } -} - -func TestBuildServicePorts(t *testing.T) { - tests := map[string]struct { - mg *multigresv1alpha1.MultiGateway - want []corev1.ServicePort - }{ - "default ports": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - want: []corev1.ServicePort{ - { - Name: "http", - Port: 15100, - TargetPort: intstr.FromString("http"), - Protocol: corev1.ProtocolTCP, - }, - { - Name: "grpc", - Port: 15170, - TargetPort: intstr.FromString("grpc"), - Protocol: corev1.ProtocolTCP, - }, - { - Name: "postgres", - Port: 15432, - TargetPort: intstr.FromString("postgres"), - Protocol: corev1.ProtocolTCP, - }, - }, - }, - "custom ports": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{ - HTTPPort: 1, - GRPCPort: 2, - PostgresPort: 3, - }, - }, - want: []corev1.ServicePort{ - { - Name: "http", - Port: 1, - TargetPort: intstr.FromString("http"), - Protocol: corev1.ProtocolTCP, - }, - - { - Name: "grpc", - Port: 2, - TargetPort: intstr.FromString("grpc"), - Protocol: corev1.ProtocolTCP, - }, - { - Name: "postgres", - Port: 3, - TargetPort: intstr.FromString("postgres"), - Protocol: corev1.ProtocolTCP, - }, - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got := buildServicePorts(tc.mg) - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("buildServicePorts() mismatch (-want +got):\n%s", diff) - } - }) - } -} diff --git a/pkg/resource-handler/controller/multigateway/service.go b/pkg/resource-handler/controller/multigateway/service.go deleted file mode 100644 index aa7d1bc3..00000000 --- a/pkg/resource-handler/controller/multigateway/service.go +++ /dev/null @@ -1,41 +0,0 @@ -package multigateway - -import ( - "fmt" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - - multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" - "github.com/numtide/multigres-operator/pkg/resource-handler/controller/metadata" -) - -// BuildService creates a client Service for external access to Etcd. -// This service load balances across all etcd members. -func BuildService( - mg *multigresv1alpha1.MultiGateway, - scheme *runtime.Scheme, -) (*corev1.Service, error) { - labels := metadata.BuildStandardLabels(mg.Name, ComponentName, mg.Spec.CellName) - - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: mg.Name, - Namespace: mg.Namespace, - Labels: labels, - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeClusterIP, - Selector: labels, - Ports: buildServicePorts(mg), - }, - } - - if err := ctrl.SetControllerReference(mg, svc, scheme); err != nil { - return nil, fmt.Errorf("failed to set controller reference: %w", err) - } - - return svc, nil -} diff --git a/pkg/resource-handler/controller/multigateway/service_test.go b/pkg/resource-handler/controller/multigateway/service_test.go deleted file mode 100644 index 4ed98885..00000000 --- a/pkg/resource-handler/controller/multigateway/service_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package multigateway - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" -) - -func TestBuildService(t *testing.T) { - scheme := runtime.NewScheme() - _ = multigresv1alpha1.AddToScheme(scheme) - - tests := map[string]struct { - mg *multigresv1alpha1.MultiGateway - scheme *runtime.Scheme - want *corev1.Service - wantErr bool - }{ - "minimal spec": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - UID: "test-uid", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - scheme: scheme, - want: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - Labels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-multigateway", - "app.kubernetes.io/component": "multigateway", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "multigres.com/v1alpha1", - Kind: "MultiGateway", - Name: "test-multigateway", - UID: "test-uid", - Controller: boolPtr(true), - BlockOwnerDeletion: boolPtr(true), - }, - }, - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeClusterIP, - Selector: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-multigateway", - "app.kubernetes.io/component": "multigateway", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - Ports: buildServicePorts(&multigresv1alpha1.MultiGateway{}), - }, - }, - }, - "scheme with incorrect type - should error": { - mg: &multigresv1alpha1.MultiGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-multigateway", - Namespace: "default", - }, - Spec: multigresv1alpha1.MultiGatewaySpec{}, - }, - scheme: runtime.NewScheme(), // empty scheme with incorrect type - wantErr: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got, err := BuildService(tc.mg, tc.scheme) - - if (err != nil) != tc.wantErr { - t.Errorf("BuildClientService() error = %v, wantErr %v", err, tc.wantErr) - return - } - - if tc.wantErr { - return - } - - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("BuildClientService() mismatch (-want +got):\n%s", diff) - } - }) - } -} diff --git a/pkg/resource-handler/controller/shard/containers.go b/pkg/resource-handler/controller/shard/containers.go new file mode 100644 index 00000000..8a2627aa --- /dev/null +++ b/pkg/resource-handler/controller/shard/containers.go @@ -0,0 +1,181 @@ +package shard + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +const ( + // DefaultMultiPoolerImage is the default multipooler container image + DefaultMultiPoolerImage = "ghcr.io/multigres/multipooler:latest" + + // DefaultPgctldImage is the default pgctld container image + DefaultPgctldImage = "ghcr.io/multigres/pgctld:latest" + + // DefaultPostgresImage is the default postgres container image + DefaultPostgresImage = "postgres:17" + + // DefaultMultiOrchImage is the default multiorch container image + DefaultMultiOrchImage = "numtide/multigres-operator:latest" + + // PgctldVolumeName is the name of the shared volume for pgctld binary + PgctldVolumeName = "pgctld-bin" + + // PgctldMountPath is the mount path for pgctld binary in postgres container + PgctldMountPath = "/usr/local/bin/pgctld" + + // DataVolumeName is the name of the data volume for PostgreSQL + DataVolumeName = "pgdata" + + // DataMountPath is the mount path for PostgreSQL data + DataMountPath = "/var/lib/postgresql/data" +) + +// sidecarRestartPolicy is the restart policy for native sidecar containers +var sidecarRestartPolicy = corev1.ContainerRestartPolicyAlways + +// buildPostgresContainer creates the postgres container spec for a pool. +// This runs pgctld binary (which wraps postgres) and mounts persistent data storage. +func buildPostgresContainer( + shard *multigresv1alpha1.Shard, + pool multigresv1alpha1.ShardPoolSpec, +) corev1.Container { + image := DefaultPostgresImage + if shard.Spec.Images.Postgres != "" { + image = shard.Spec.Images.Postgres + } + + return corev1.Container{ + Name: "postgres", + Image: image, + Resources: pool.Postgres.Resources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + { + Name: PgctldVolumeName, + MountPath: PgctldMountPath, + }, + }, + } +} + +// buildMultiPoolerSidecar creates the multipooler sidecar container spec. +// This is implemented as a native sidecar using init container with +// restartPolicy: Always (K8s 1.28+). +func buildMultiPoolerSidecar( + shard *multigresv1alpha1.Shard, + pool multigresv1alpha1.ShardPoolSpec, + poolName string, +) corev1.Container { + image := DefaultMultiPoolerImage + if shard.Spec.Images.MultiPooler != "" { + image = shard.Spec.Images.MultiPooler + } + + // TODO: Add remaining command line arguments: + // --topo-global-server-addresses (needs global topo server ref in ShardSpec) + // --topo-global-root (needs global topo server ref in ShardSpec) + // --pooler-dir, --grpc-socket-file, --log-level, --log-output, --hostname, --service-map + + args := []string{ + "--http-port", "15200", + "--grpc-port", "15270", + "--topo-implementation", "etcd2", + "--cell", pool.Cell, + "--database", pool.Database, + "--table-group", pool.TableGroup, + "--service-id", getPoolServiceID(shard.Name, poolName), + "--pgctld-addr", "localhost:15470", + "--pg-port", "5432", + } + + return corev1.Container{ + Name: "multipooler", + Image: image, + Args: args, + Ports: buildMultiPoolerContainerPorts(), + Resources: pool.MultiPooler.Resources, + RestartPolicy: &sidecarRestartPolicy, + } +} + +// buildPgctldInitContainer creates the pgctld init container spec. +// This copies the pgctld binary to a shared volume for use by the postgres container. +func buildPgctldInitContainer(shard *multigresv1alpha1.Shard) corev1.Container { + image := DefaultPgctldImage + // TODO: Add pgctld image field to Shard spec if needed + + return corev1.Container{ + Name: "pgctld-init", + Image: image, + Command: []string{"sh", "-c", "cp /pgctld /shared/pgctld && chmod +x /shared/pgctld"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: PgctldVolumeName, + MountPath: "/shared", + }, + }, + } +} + +// buildMultiOrchContainer creates the MultiOrch container spec. +func buildMultiOrchContainer(shard *multigresv1alpha1.Shard) corev1.Container { + image := DefaultMultiOrchImage + if shard.Spec.MultiOrch.Image != "" { + image = shard.Spec.MultiOrch.Image + } + + // TODO: Add remaining command line arguments: + // --topo-global-server-addresses (needs global topo server ref in ShardSpec) + // --topo-global-root (needs global topo server ref in ShardSpec) + // --cell (needs to be determined per-pod using StatefulSet with topology spread or env var from Downward API based on shard.Spec.MultiOrch.Cells) + // --log-level, --log-output, --hostname + + args := []string{ + "--http-port", "15300", + "--grpc-port", "15370", + "--topo-implementation", "etcd2", + } + + return corev1.Container{ + Name: "multiorch", + Image: image, + Args: args, + Ports: buildMultiOrchContainerPorts(), + Resources: shard.Spec.MultiOrch.Resources, + } +} + +// buildPgctldVolume creates the shared emptyDir volume for pgctld binary. +func buildPgctldVolume() corev1.Volume { + return corev1.Volume{ + Name: PgctldVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + } +} + +// buildDataVolumeClaimTemplate creates the PVC template for PostgreSQL data. +func buildDataVolumeClaimTemplate( + pool multigresv1alpha1.ShardPoolSpec, +) corev1.PersistentVolumeClaim { + // Use the pool's DataVolumeClaimTemplate directly if provided + return corev1.PersistentVolumeClaim{ + Spec: pool.DataVolumeClaimTemplate, + } +} + +// getPoolServiceID generates a unique service ID for a pool. +// This is used in multipooler and pgctld arguments. +func getPoolServiceID(shardName string, poolName string) string { + // TODO: Use proper ID generation (UUID or consistent hash) + // For now, use simple format + return fmt.Sprintf("%s-pool-%s", shardName, poolName) +} diff --git a/pkg/resource-handler/controller/shard/containers_test.go b/pkg/resource-handler/controller/shard/containers_test.go new file mode 100644 index 00000000..cf487902 --- /dev/null +++ b/pkg/resource-handler/controller/shard/containers_test.go @@ -0,0 +1,389 @@ +package shard + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +func TestBuildPostgresContainer(t *testing.T) { + tests := map[string]struct { + shard *multigresv1alpha1.Shard + poolSpec multigresv1alpha1.ShardPoolSpec + want corev1.Container + }{ + "default postgres image with no resources": { + shard: &multigresv1alpha1.Shard{ + Spec: multigresv1alpha1.ShardSpec{}, + }, + poolSpec: multigresv1alpha1.ShardPoolSpec{}, + want: corev1.Container{ + Name: "postgres", + Image: DefaultPostgresImage, + Resources: corev1.ResourceRequirements{}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + { + Name: PgctldVolumeName, + MountPath: PgctldMountPath, + }, + }, + }, + }, + "custom postgres image": { + shard: &multigresv1alpha1.Shard{ + Spec: multigresv1alpha1.ShardSpec{ + Images: multigresv1alpha1.ShardImagesSpec{ + Postgres: "postgres:16", + }, + }, + }, + poolSpec: multigresv1alpha1.ShardPoolSpec{}, + want: corev1.Container{ + Name: "postgres", + Image: "postgres:16", + Resources: corev1.ResourceRequirements{}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + { + Name: PgctldVolumeName, + MountPath: PgctldMountPath, + }, + }, + }, + }, + "with resource requirements": { + shard: &multigresv1alpha1.Shard{ + Spec: multigresv1alpha1.ShardSpec{}, + }, + poolSpec: multigresv1alpha1.ShardPoolSpec{ + Postgres: multigresv1alpha1.PostgresSpec{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, + }, + }, + }, + want: corev1.Container{ + Name: "postgres", + Image: DefaultPostgresImage, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + { + Name: PgctldVolumeName, + MountPath: PgctldMountPath, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildPostgresContainer(tc.shard, tc.poolSpec) + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildPostgresContainer() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildMultiPoolerSidecar(t *testing.T) { + tests := map[string]struct { + shard *multigresv1alpha1.Shard + poolSpec multigresv1alpha1.ShardPoolSpec + want corev1.Container + }{ + "default multipooler image with no resources": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{Name: "test-shard"}, + Spec: multigresv1alpha1.ShardSpec{}, + }, + poolSpec: multigresv1alpha1.ShardPoolSpec{ + Cell: "zone1", + Database: "testdb", + TableGroup: "default", + }, + want: corev1.Container{ + Name: "multipooler", + Image: DefaultMultiPoolerImage, + Args: []string{ + "--http-port", "15200", + "--grpc-port", "15270", + "--topo-implementation", "etcd2", + "--cell", "zone1", + "--database", "testdb", + "--table-group", "default", + "--service-id", "test-shard-pool-primary", + "--pgctld-addr", "localhost:15470", + "--pg-port", "5432", + }, + Ports: buildMultiPoolerContainerPorts(), + Resources: corev1.ResourceRequirements{}, + RestartPolicy: &sidecarRestartPolicy, + }, + }, + "custom multipooler image": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{Name: "custom-shard"}, + Spec: multigresv1alpha1.ShardSpec{ + Images: multigresv1alpha1.ShardImagesSpec{ + MultiPooler: "custom/multipooler:v1.0.0", + }, + }, + }, + poolSpec: multigresv1alpha1.ShardPoolSpec{ + Cell: "zone2", + Database: "proddb", + TableGroup: "orders", + }, + want: corev1.Container{ + Name: "multipooler", + Image: "custom/multipooler:v1.0.0", + Args: []string{ + "--http-port", "15200", + "--grpc-port", "15270", + "--topo-implementation", "etcd2", + "--cell", "zone2", + "--database", "proddb", + "--table-group", "orders", + "--service-id", "custom-shard-pool-primary", + "--pgctld-addr", "localhost:15470", + "--pg-port", "5432", + }, + Ports: buildMultiPoolerContainerPorts(), + Resources: corev1.ResourceRequirements{}, + RestartPolicy: &sidecarRestartPolicy, + }, + }, + "with resource requirements": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{Name: "resource-shard"}, + Spec: multigresv1alpha1.ShardSpec{}, + }, + poolSpec: multigresv1alpha1.ShardPoolSpec{ + Cell: "zone1", + Database: "mydb", + TableGroup: "default", + MultiPooler: multigresv1alpha1.MultiPoolerSpec{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("256Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("512Mi"), + }, + }, + }, + }, + want: corev1.Container{ + Name: "multipooler", + Image: DefaultMultiPoolerImage, + Args: []string{ + "--http-port", "15200", + "--grpc-port", "15270", + "--topo-implementation", "etcd2", + "--cell", "zone1", + "--database", "mydb", + "--table-group", "default", + "--service-id", "resource-shard-pool-primary", + "--pgctld-addr", "localhost:15470", + "--pg-port", "5432", + }, + Ports: buildMultiPoolerContainerPorts(), + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("256Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("512Mi"), + }, + }, + RestartPolicy: &sidecarRestartPolicy, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildMultiPoolerSidecar(tc.shard, tc.poolSpec, "primary") + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildMultiPoolerSidecar() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildPgctldInitContainer(t *testing.T) { + tests := map[string]struct { + shard *multigresv1alpha1.Shard + want corev1.Container + }{ + "default pgctld init container": { + shard: &multigresv1alpha1.Shard{ + Spec: multigresv1alpha1.ShardSpec{}, + }, + want: corev1.Container{ + Name: "pgctld-init", + Image: DefaultPgctldImage, + Command: []string{ + "sh", + "-c", + "cp /pgctld /shared/pgctld && chmod +x /shared/pgctld", + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: PgctldVolumeName, + MountPath: "/shared", + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildPgctldInitContainer(tc.shard) + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildPgctldInitContainer() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildMultiOrchContainer(t *testing.T) { + tests := map[string]struct { + shard *multigresv1alpha1.Shard + want corev1.Container + }{ + "default multiorch container": { + shard: &multigresv1alpha1.Shard{ + Spec: multigresv1alpha1.ShardSpec{}, + }, + want: corev1.Container{ + Name: "multiorch", + Image: DefaultMultiOrchImage, + Args: []string{ + "--http-port", "15300", + "--grpc-port", "15370", + "--topo-implementation", "etcd2", + }, + Ports: buildMultiOrchContainerPorts(), + Resources: corev1.ResourceRequirements{}, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildMultiOrchContainer(tc.shard) + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildMultiOrchContainer() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildPgctldVolume(t *testing.T) { + want := corev1.Volume{ + Name: PgctldVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + } + + got := buildPgctldVolume() + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("buildPgctldVolume() mismatch (-want +got):\n%s", diff) + } +} + +func TestBuildDataVolumeClaimTemplate(t *testing.T) { + tests := map[string]struct { + poolSpec multigresv1alpha1.ShardPoolSpec + want corev1.PersistentVolumeClaim + }{ + "with storage class and size": { + poolSpec: multigresv1alpha1.ShardPoolSpec{ + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + StorageClassName: ptr.To("fast-ssd"), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + want: corev1.PersistentVolumeClaim{ + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: ptr.To("fast-ssd"), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + "minimal spec": { + poolSpec: multigresv1alpha1.ShardPoolSpec{ + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{}, + }, + want: corev1.PersistentVolumeClaim{ + Spec: corev1.PersistentVolumeClaimSpec{}, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildDataVolumeClaimTemplate(tc.poolSpec) + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildDataVolumeClaimTemplate() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/resource-handler/controller/shard/integration_test.go b/pkg/resource-handler/controller/shard/integration_test.go new file mode 100644 index 00000000..1347d969 --- /dev/null +++ b/pkg/resource-handler/controller/shard/integration_test.go @@ -0,0 +1,358 @@ +//go:build integration +// +build integration + +package shard_test + +import ( + "path/filepath" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + shardcontroller "github.com/numtide/multigres-operator/pkg/resource-handler/controller/shard" + "github.com/numtide/multigres-operator/pkg/testutil" +) + +func TestSetupWithManager(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + mgr := testutil.SetUpEnvtestManager(t, scheme, + testutil.WithCRDPaths( + filepath.Join("../../../../", "config", "crd", "bases"), + ), + ) + + if err := (&shardcontroller.ShardReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr, controller.Options{ + SkipNameValidation: ptr.To(true), + }); err != nil { + t.Fatalf("Failed to create controller, %v", err) + } +} + +func TestShardReconciliation(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + tests := map[string]struct { + shard *multigresv1alpha1.Shard + wantResources []client.Object + }{ + "simple shard with single replica pool": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"us-west-1a", "us-west-1b"}, // 2 cells = 2 replicas + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "us-west-1a", + Type: "replica", + Database: "testdb", + TableGroup: "default", + Replicas: ptr.To(int32(2)), + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + wantResources: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + // TODO: Cell information is not mapped for MultiOrch + // Labels: shardLabels(t, "test-shard-multiorch", "multiorch", "us-west-1a"), + Labels: shardLabels(t, "test-shard-multiorch", "multiorch", "multigres-global-topo"), + OwnerReferences: shardOwnerRefs(t, "test-shard"), + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(2)), + Selector: &metav1.LabelSelector{ + // TODO: Cell information is not mapped for MultiOrch + // MatchLabels: shardLabels(t, "test-shard-multiorch", "multiorch", "us-west-1a"), + MatchLabels: shardLabels(t, "test-shard-multiorch", "multiorch", "multigres-global-topo"), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + // TODO: Cell information is not mapped for MultiOrch + // Labels: shardLabels(t, "test-shard-multiorch", "multiorch", "us-west-1a"), + Labels: shardLabels(t, "test-shard-multiorch", "multiorch", "multigres-global-topo"), + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "multiorch", + Image: "numtide/multigres-operator:latest", + Args: []string{ + "--http-port", "15300", + "--grpc-port", "15370", + "--topo-implementation", "etcd2", + }, + Ports: []corev1.ContainerPort{ + tcpPort(t, "http", 15300), + tcpPort(t, "grpc", 15370), + }, + }, + }, + }, + }, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + // TODO: Cell information is not mapped for MultiOrch + // Labels: shardLabels(t, "test-shard-multiorch", "multiorch", "us-west-1a"), + Labels: shardLabels(t, "test-shard-multiorch", "multiorch", "multigres-global-topo"), + OwnerReferences: shardOwnerRefs(t, "test-shard"), + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + tcpServicePort(t, "http", 15300), + tcpServicePort(t, "grpc", 15370), + }, + // TODO: Cell information is not mapped for MultiOrch + // Selector: shardLabels(t, "test-shard-multiorch", "multiorch", "us-west-1a"), + Selector: shardLabels(t, "test-shard-multiorch", "multiorch", "multigres-global-topo"), + }, + }, + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-pool-primary", + Namespace: "default", + Labels: shardLabels(t, "test-shard-pool-primary", "shard-pool", "us-west-1a"), + OwnerReferences: shardOwnerRefs(t, "test-shard"), + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "test-shard-pool-primary-headless", + Replicas: ptr.To(int32(2)), + Selector: &metav1.LabelSelector{ + MatchLabels: shardLabels(t, "test-shard-pool-primary", "shard-pool", "us-west-1a"), + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: shardLabels(t, "test-shard-pool-primary", "shard-pool", "us-west-1a"), + }, + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: "pgctld-init", + Image: "ghcr.io/multigres/pgctld:latest", + Command: []string{"sh", "-c", "cp /pgctld /shared/pgctld && chmod +x /shared/pgctld"}, + VolumeMounts: []corev1.VolumeMount{ + {Name: "pgctld-bin", MountPath: "/shared"}, + }, + }, + { + Name: "multipooler", + Image: "ghcr.io/multigres/multipooler:latest", + Args: []string{ + "--http-port", "15200", + "--grpc-port", "15270", + "--topo-implementation", "etcd2", + "--cell", "us-west-1a", + "--database", "testdb", + "--table-group", "default", + "--service-id", "test-shard-pool-primary", + "--pgctld-addr", "localhost:15470", + "--pg-port", "5432", + }, + Ports: multipoolerPorts(t), + RestartPolicy: ptr.To(corev1.ContainerRestartPolicyAlways), + }, + }, + Containers: []corev1.Container{ + { + Name: "postgres", + Image: "postgres:17", + VolumeMounts: []corev1.VolumeMount{ + {Name: "pgdata", MountPath: "/var/lib/postgresql/data"}, + {Name: "pgctld-bin", MountPath: "/usr/local/bin/pgctld"}, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "pgctld-bin", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pgdata", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + VolumeMode: ptr.To(corev1.PersistentVolumeFilesystem), + }, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimPending, + }, + }, + }, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-pool-primary-headless", + Namespace: "default", + Labels: shardLabels(t, "test-shard-pool-primary", "shard-pool", "us-west-1a"), + OwnerReferences: shardOwnerRefs(t, "test-shard"), + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: corev1.ClusterIPNone, + Ports: []corev1.ServicePort{ + tcpServicePort(t, "http", 15200), + tcpServicePort(t, "grpc", 15270), + tcpServicePort(t, "postgres", 5432), + }, + Selector: shardLabels(t, "test-shard-pool-primary", "shard-pool", "us-west-1a"), + PublishNotReadyAddresses: true, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + ctx := t.Context() + mgr := testutil.SetUpEnvtestManager(t, scheme, + testutil.WithCRDPaths( + filepath.Join("../../../../", "config", "crd", "bases"), + ), + ) + + watcher := testutil.NewResourceWatcher(t, ctx, mgr, + testutil.WithCmpOpts( + testutil.IgnoreMetaRuntimeFields(), + testutil.IgnoreServiceRuntimeFields(), + testutil.IgnoreDeploymentRuntimeFields(), + testutil.IgnoreStatefulSetRuntimeFields(), + testutil.IgnorePodSpecDefaults(), + testutil.IgnoreDeploymentSpecDefaults(), + testutil.IgnoreStatefulSetSpecDefaults(), + ), + testutil.WithExtraResource(&multigresv1alpha1.Shard{}), + ) + client := mgr.GetClient() + + shardReconciler := &shardcontroller.ShardReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + } + if err := shardReconciler.SetupWithManager(mgr, controller.Options{ + // Needed for the parallel test runs + SkipNameValidation: ptr.To(true), + }); err != nil { + t.Fatalf("Failed to create controller, %v", err) + } + + if err := client.Create(ctx, tc.shard); err != nil { + t.Fatalf("Failed to create the initial item, %v", err) + } + + if err := watcher.WaitForMatch(tc.wantResources...); err != nil { + t.Errorf("Resources mismatch:\n%v", err) + } + }) + } +} + +// Test helpers + +// shardLabels returns standard labels for shard resources in tests +func shardLabels(t testing.TB, instanceName, component, cellName string) map[string]string { + t.Helper() + return map[string]string{ + "app.kubernetes.io/component": component, + "app.kubernetes.io/instance": instanceName, + "app.kubernetes.io/managed-by": "multigres-operator", + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/part-of": "multigres", + } +} + +// shardOwnerRefs returns owner references for a Shard resource +func shardOwnerRefs(t testing.TB, shardName string) []metav1.OwnerReference { + t.Helper() + return []metav1.OwnerReference{{ + APIVersion: "multigres.com/v1alpha1", + Kind: "Shard", + Name: shardName, + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }} +} + +// tcpPort creates a simple TCP container port +func tcpPort(t testing.TB, name string, port int32) corev1.ContainerPort { + t.Helper() + return corev1.ContainerPort{Name: name, ContainerPort: port, Protocol: corev1.ProtocolTCP} +} + +// tcpServicePort creates a TCP service port with named target +func tcpServicePort(t testing.TB, name string, port int32) corev1.ServicePort { + t.Helper() + return corev1.ServicePort{Name: name, Port: port, TargetPort: intstr.FromString(name), Protocol: corev1.ProtocolTCP} +} + +// multipoolerPorts returns the standard multipooler container ports +func multipoolerPorts(t testing.TB) []corev1.ContainerPort { + t.Helper() + return []corev1.ContainerPort{ + tcpPort(t, "http", 15200), + tcpPort(t, "grpc", 15270), + tcpPort(t, "postgres", 5432), + } +} diff --git a/pkg/resource-handler/controller/shard/labels.go b/pkg/resource-handler/controller/shard/labels.go new file mode 100644 index 00000000..47576d79 --- /dev/null +++ b/pkg/resource-handler/controller/shard/labels.go @@ -0,0 +1,34 @@ +package shard + +import ( + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/metadata" +) + +// buildPoolLabels creates standard labels for pool resources, and uses the +// pool's database, table group, and cell details. Any additional labels are +// also merged, while keeping the main labels. +func buildPoolLabels( + shard *multigresv1alpha1.Shard, + poolName string, + poolSpec multigresv1alpha1.ShardPoolSpec, +) map[string]string { + fullPoolName := buildPoolName(shard.Name, poolName) + cellName := poolSpec.Cell + if cellName == "" { + cellName = metadata.DefaultCellName + } + + // TODO: Remove this once we figure what to do with the cell name. + _ = cellName + + labels := metadata.BuildStandardLabels(fullPoolName, PoolComponentName) + // TODO: Add multigres.com/* labels after finalizing label design: + // metadata.AddCellLabel(labels, cellName) + // metadata.AddDatabaseLabel(labels, poolSpec.Database) + // metadata.AddTableGroupLabel(labels, poolSpec.TableGroup) + + metadata.MergeLabels(labels, shard.GetObjectMeta().GetLabels()) + + return labels +} diff --git a/pkg/resource-handler/controller/shard/multiorch.go b/pkg/resource-handler/controller/shard/multiorch.go new file mode 100644 index 00000000..37423cec --- /dev/null +++ b/pkg/resource-handler/controller/shard/multiorch.go @@ -0,0 +1,98 @@ +package shard + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/metadata" +) + +const ( + // MultiOrchComponentName is the component label value for MultiOrch resources + MultiOrchComponentName = "multiorch" +) + +// BuildMultiOrchDeployment creates a Deployment for the MultiOrch component. +// MultiOrch handles orchestration for the shard. +func BuildMultiOrchDeployment( + shard *multigresv1alpha1.Shard, + scheme *runtime.Scheme, +) (*appsv1.Deployment, error) { + // MultiOrch should be deployed one per Cell. + // The user provided config of "Cells" can specify exact cells to deploy to, + // and when not specified, MultigresCluster configuration should pass down + // computed list of Cells associated for the entire MultigresCluster. + cellCount := len(shard.Spec.MultiOrch.Cells) + replicas := int32(cellCount) + + name := shard.Name + "-multiorch" + // MultiOrch doesn't have a specific cell, use default + // TODO: This label setup needs to be further reviewed, it probably needs more. + labels := metadata.BuildStandardLabels(name, MultiOrchComponentName) + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: shard.Namespace, + Labels: labels, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + buildMultiOrchContainer(shard), + }, + // TODO: Add Affinity support to MultiOrchSpec (like Cell's StatelessSpec) + // This would allow pod affinity/anti-affinity rules for MultiOrch deployment + }, + }, + }, + } + + if err := ctrl.SetControllerReference(shard, deployment, scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + + return deployment, nil +} + +// BuildMultiOrchService creates a Service for the MultiOrch component. +func BuildMultiOrchService( + shard *multigresv1alpha1.Shard, + scheme *runtime.Scheme, +) (*corev1.Service, error) { + name := shard.Name + "-multiorch" + labels := metadata.BuildStandardLabels(name, MultiOrchComponentName) + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: shard.Namespace, + Labels: labels, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: labels, + Ports: buildMultiOrchServicePorts(), + }, + } + + if err := ctrl.SetControllerReference(shard, svc, scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + + return svc, nil +} diff --git a/pkg/resource-handler/controller/shard/multiorch_test.go b/pkg/resource-handler/controller/shard/multiorch_test.go new file mode 100644 index 00000000..b473e502 --- /dev/null +++ b/pkg/resource-handler/controller/shard/multiorch_test.go @@ -0,0 +1,353 @@ +package shard + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +func TestBuildMultiOrchDeployment(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + tests := map[string]struct { + shard *multigresv1alpha1.Shard + scheme *runtime.Scheme + want *appsv1.Deployment + wantErr bool + }{ + "minimal spec - all defaults": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"us-west-1a"}, // 1 cell = 1 replica + }, + }, + }, + scheme: scheme, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-shard-multiorch", + "app.kubernetes.io/component": MultiOrchComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Shard", + Name: "test-shard", + UID: "test-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(1)), // Replicas = len(cells) + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-shard-multiorch", + "app.kubernetes.io/component": MultiOrchComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-shard-multiorch", + "app.kubernetes.io/component": MultiOrchComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + buildMultiOrchContainer(&multigresv1alpha1.Shard{ + Spec: multigresv1alpha1.ShardSpec{}, + }), + }, + }, + }, + }, + }, + }, + "with different shard name": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "production-shard", + Namespace: "prod-ns", + UID: "prod-uid", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1", "zone2"}, // 2 cells = 2 replicas + }, + }, + }, + scheme: scheme, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "production-shard-multiorch", + Namespace: "prod-ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "production-shard-multiorch", + "app.kubernetes.io/component": MultiOrchComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Shard", + Name: "production-shard", + UID: "prod-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(2)), // Replicas = len(cells) = 2 + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "production-shard-multiorch", + "app.kubernetes.io/component": MultiOrchComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "production-shard-multiorch", + "app.kubernetes.io/component": MultiOrchComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + buildMultiOrchContainer(&multigresv1alpha1.Shard{ + Spec: multigresv1alpha1.ShardSpec{}, + }), + }, + }, + }, + }, + }, + }, + "invalid scheme - should error": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{}, + }, + scheme: runtime.NewScheme(), // empty scheme + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got, err := BuildMultiOrchDeployment(tc.shard, tc.scheme) + + if (err != nil) != tc.wantErr { + t.Errorf("BuildMultiOrchDeployment() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildMultiOrchDeployment() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildMultiOrchService(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + tests := map[string]struct { + shard *multigresv1alpha1.Shard + scheme *runtime.Scheme + want *corev1.Service + wantErr bool + }{ + "minimal spec": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.ShardSpec{}, + }, + scheme: scheme, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-shard-multiorch", + "app.kubernetes.io/component": MultiOrchComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Shard", + Name: "test-shard", + UID: "test-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-shard-multiorch", + "app.kubernetes.io/component": MultiOrchComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: DefaultMultiOrchHTTPPort, + TargetPort: intstr.FromString("http"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + Port: DefaultMultiOrchGRPCPort, + TargetPort: intstr.FromString("grpc"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + "with different namespace": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "production-shard", + Namespace: "prod-ns", + UID: "prod-uid", + }, + Spec: multigresv1alpha1.ShardSpec{}, + }, + scheme: scheme, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "production-shard-multiorch", + Namespace: "prod-ns", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "production-shard-multiorch", + "app.kubernetes.io/component": MultiOrchComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Shard", + Name: "production-shard", + UID: "prod-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "production-shard-multiorch", + "app.kubernetes.io/component": MultiOrchComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: DefaultMultiOrchHTTPPort, + TargetPort: intstr.FromString("http"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + Port: DefaultMultiOrchGRPCPort, + TargetPort: intstr.FromString("grpc"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + "invalid scheme - should error": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{}, + }, + scheme: runtime.NewScheme(), // empty scheme + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got, err := BuildMultiOrchService(tc.shard, tc.scheme) + + if (err != nil) != tc.wantErr { + t.Errorf("BuildMultiOrchService() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildMultiOrchService() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/resource-handler/controller/shard/pool_service.go b/pkg/resource-handler/controller/shard/pool_service.go new file mode 100644 index 00000000..0344789f --- /dev/null +++ b/pkg/resource-handler/controller/shard/pool_service.go @@ -0,0 +1,54 @@ +package shard + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +const ( + // PoolComponentName is the component label value for pool resources + PoolComponentName = "shard-pool" +) + +// BuildPoolHeadlessService creates a headless Service for a pool's StatefulSet. +// Headless services are required for StatefulSet pod DNS records. +func BuildPoolHeadlessService( + shard *multigresv1alpha1.Shard, + poolName string, + poolSpec multigresv1alpha1.ShardPoolSpec, + scheme *runtime.Scheme, +) (*corev1.Service, error) { + name := buildPoolName(shard.Name, poolName) + labels := buildPoolLabels(shard, poolName, poolSpec) + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name + "-headless", + Namespace: shard.Namespace, + Labels: labels, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: corev1.ClusterIPNone, + Selector: labels, + Ports: buildPoolHeadlessServicePorts(), + PublishNotReadyAddresses: true, + }, + } + + if err := ctrl.SetControllerReference(shard, svc, scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + + return svc, nil +} + +// buildPoolName generates a consistent name for pool resources. +func buildPoolName(shardName, poolName string) string { + return fmt.Sprintf("%s-pool-%s", shardName, poolName) +} diff --git a/pkg/resource-handler/controller/shard/pool_service_test.go b/pkg/resource-handler/controller/shard/pool_service_test.go new file mode 100644 index 00000000..a328ceb2 --- /dev/null +++ b/pkg/resource-handler/controller/shard/pool_service_test.go @@ -0,0 +1,328 @@ +package shard + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +func TestBuildPoolHeadlessService(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + tests := map[string]struct { + shard *multigresv1alpha1.Shard + poolName string + poolSpec multigresv1alpha1.ShardPoolSpec + scheme *runtime.Scheme + want *corev1.Service + wantErr bool + }{ + "replica pool headless service": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.ShardSpec{}, + }, + poolName: "primary", + poolSpec: multigresv1alpha1.ShardPoolSpec{ + Type: "replica", + }, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-pool-primary-headless", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-shard-pool-primary", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Shard", + Name: "test-shard", + UID: "test-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: corev1.ClusterIPNone, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-shard-pool-primary", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: DefaultMultiPoolerHTTPPort, + TargetPort: intstr.FromString("http"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + Port: DefaultMultiPoolerGRPCPort, + TargetPort: intstr.FromString("grpc"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + Port: DefaultPostgresPort, + TargetPort: intstr.FromString("postgres"), + Protocol: corev1.ProtocolTCP, + }, + }, + PublishNotReadyAddresses: true, + }, + }, + }, + "readOnly pool with custom cell": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shard-001", + Namespace: "prod", + UID: "prod-uid", + }, + Spec: multigresv1alpha1.ShardSpec{}, + }, + poolName: "ro", + poolSpec: multigresv1alpha1.ShardPoolSpec{ + Type: "readonly", + Cell: "zone-east", + }, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shard-001-pool-ro-headless", + Namespace: "prod", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "shard-001-pool-ro", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Shard", + Name: "shard-001", + UID: "prod-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: corev1.ClusterIPNone, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "shard-001-pool-ro", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: DefaultMultiPoolerHTTPPort, + TargetPort: intstr.FromString("http"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + Port: DefaultMultiPoolerGRPCPort, + TargetPort: intstr.FromString("grpc"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + Port: DefaultPostgresPort, + TargetPort: intstr.FromString("postgres"), + Protocol: corev1.ProtocolTCP, + }, + }, + PublishNotReadyAddresses: true, + }, + }, + }, + "pool without type uses index in name": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shard-002", + Namespace: "default", + UID: "uid-002", + }, + Spec: multigresv1alpha1.ShardSpec{}, + }, + poolName: "primary", + poolSpec: multigresv1alpha1.ShardPoolSpec{}, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shard-002-pool-primary-headless", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "shard-002-pool-primary", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Shard", + Name: "shard-002", + UID: "uid-002", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: corev1.ClusterIPNone, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "shard-002-pool-primary", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: DefaultMultiPoolerHTTPPort, + TargetPort: intstr.FromString("http"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + Port: DefaultMultiPoolerGRPCPort, + TargetPort: intstr.FromString("grpc"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + Port: DefaultPostgresPort, + TargetPort: intstr.FromString("postgres"), + Protocol: corev1.ProtocolTCP, + }, + }, + PublishNotReadyAddresses: true, + }, + }, + }, + "invalid scheme - should error": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{}, + }, + poolSpec: multigresv1alpha1.ShardPoolSpec{ + Type: "replica", + }, + poolName: "primary", + scheme: runtime.NewScheme(), // empty scheme + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + testScheme := scheme + if tc.scheme != nil { + testScheme = tc.scheme + } + got, err := BuildPoolHeadlessService(tc.shard, tc.poolName, tc.poolSpec, testScheme) + + if (err != nil) != tc.wantErr { + t.Errorf("BuildPoolHeadlessService() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildPoolHeadlessService() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildPoolLabels(t *testing.T) { + tests := map[string]struct { + shard *multigresv1alpha1.Shard + poolName string + want map[string]string + }{ + "primary pool": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{Name: "test-shard"}, + Spec: multigresv1alpha1.ShardSpec{ + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": {Cell: "zone-west"}, + }, + }, + }, + poolName: "primary", + want: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-shard-pool-primary", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + "replica pool": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{Name: "my-shard"}, + Spec: multigresv1alpha1.ShardSpec{ + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "replica": {}, + }, + }, + }, + poolName: "replica", + want: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "my-shard-pool-replica", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + // Getting the pool details by getting the definitions based on + // poolName provided. + poolSpec := tc.shard.Spec.Pools[tc.poolName] + got := buildPoolLabels(tc.shard, tc.poolName, poolSpec) + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildPoolLabels() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/resource-handler/controller/shard/pool_statefulset.go b/pkg/resource-handler/controller/shard/pool_statefulset.go new file mode 100644 index 00000000..52648792 --- /dev/null +++ b/pkg/resource-handler/controller/shard/pool_statefulset.go @@ -0,0 +1,109 @@ +package shard + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +const ( + // DefaultPoolReplicas is the default number of replicas for a pool + DefaultPoolReplicas int32 = 1 +) + +// BuildPoolStatefulSet creates a StatefulSet for a shard pool. +// The StatefulSet includes: +// - Init container: pgctld-init (copies pgctld binary to shared emptyDir) +// - Init container (native sidecar): multipooler (with restartPolicy: Always) +// - Main container: postgres (runs with pgctld binary) +// - EmptyDir volume for pgctld binary sharing +// - PVC for postgres data +func BuildPoolStatefulSet( + shard *multigresv1alpha1.Shard, + poolName string, + poolSpec multigresv1alpha1.ShardPoolSpec, + scheme *runtime.Scheme, +) (*appsv1.StatefulSet, error) { + name := buildPoolName(shard.Name, poolName) + headlessServiceName := name + "-headless" + labels := buildPoolLabels(shard, poolName, poolSpec) + + replicas := DefaultPoolReplicas + if poolSpec.Replicas != nil { + replicas = *poolSpec.Replicas + } + + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: shard.Namespace, + Labels: labels, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: headlessServiceName, + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + // Init containers: pgctld copies binary, multipooler is a native sidecar + InitContainers: []corev1.Container{ + buildPgctldInitContainer(shard), + buildMultiPoolerSidecar(shard, poolSpec, poolName), + }, + // Postgres is the main container (runs pgctld binary) + Containers: []corev1.Container{ + buildPostgresContainer(shard, poolSpec), + }, + // Shared volume for pgctld binary + Volumes: []corev1.Volume{ + buildPgctldVolume(), + }, + Affinity: poolSpec.Affinity, + }, + }, + VolumeClaimTemplates: buildPoolVolumeClaimTemplates(poolSpec), + }, + } + + if err := ctrl.SetControllerReference(shard, sts, scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + + return sts, nil +} + +// buildPoolVolumeClaimTemplates creates the PVC templates for a pool. +// Uses the pool's DataVolumeClaimTemplate if provided. +func buildPoolVolumeClaimTemplates( + pool multigresv1alpha1.ShardPoolSpec, +) []corev1.PersistentVolumeClaim { + pvc := corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: pool.DataVolumeClaimTemplate, + } + + // Set default VolumeMode if not specified + if pvc.Spec.VolumeMode == nil { + pvc.Spec.VolumeMode = ptr.To(corev1.PersistentVolumeFilesystem) + } + + return []corev1.PersistentVolumeClaim{pvc} +} diff --git a/pkg/resource-handler/controller/shard/pool_statefulset_test.go b/pkg/resource-handler/controller/shard/pool_statefulset_test.go new file mode 100644 index 00000000..5de2e3cf --- /dev/null +++ b/pkg/resource-handler/controller/shard/pool_statefulset_test.go @@ -0,0 +1,643 @@ +package shard + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +func TestBuildPoolStatefulSet(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + tests := map[string]struct { + shard *multigresv1alpha1.Shard + poolName string + poolSpec multigresv1alpha1.ShardPoolSpec + scheme *runtime.Scheme + want *appsv1.StatefulSet + wantErr bool + }{ + "replica pool with default replicas": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.ShardSpec{}, + }, + poolSpec: multigresv1alpha1.ShardPoolSpec{ + Type: "replica", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + poolName: "primary", + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-pool-primary", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-shard-pool-primary", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Shard", + Name: "test-shard", + UID: "test-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "test-shard-pool-primary-headless", + Replicas: ptr.To(DefaultPoolReplicas), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-shard-pool-primary", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-shard-pool-primary", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + buildPgctldInitContainer(&multigresv1alpha1.Shard{}), + buildMultiPoolerSidecar( + &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{Name: "test-shard"}, + }, + multigresv1alpha1.ShardPoolSpec{}, + "primary", + ), + }, + Containers: []corev1.Container{ + buildPostgresContainer( + &multigresv1alpha1.Shard{}, + multigresv1alpha1.ShardPoolSpec{}, + ), + }, + Volumes: []corev1.Volume{ + buildPgctldVolume(), + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + VolumeMode: ptr.To(corev1.PersistentVolumeFilesystem), + }, + }, + }, + }, + }, + }, + "readOnly pool with custom replicas and cell": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shard-001", + Namespace: "prod", + UID: "prod-uid", + }, + Spec: multigresv1alpha1.ShardSpec{}, + }, + poolSpec: multigresv1alpha1.ShardPoolSpec{ + Type: "readOnly", + Cell: "zone-west", + Replicas: ptr.To(int32(3)), + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + StorageClassName: ptr.To("fast-ssd"), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("20Gi"), + }, + }, + }, + }, + poolName: "replica", + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shard-001-pool-replica", + Namespace: "prod", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "shard-001-pool-replica", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Shard", + Name: "shard-001", + UID: "prod-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "shard-001-pool-replica-headless", + Replicas: ptr.To(int32(3)), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "shard-001-pool-replica", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "shard-001-pool-replica", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + buildPgctldInitContainer(&multigresv1alpha1.Shard{}), + buildMultiPoolerSidecar( + &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{Name: "shard-001"}, + }, + multigresv1alpha1.ShardPoolSpec{Cell: "zone-west"}, + "replica", + ), + }, + Containers: []corev1.Container{ + buildPostgresContainer( + &multigresv1alpha1.Shard{}, + multigresv1alpha1.ShardPoolSpec{}, + ), + }, + Volumes: []corev1.Volume{ + buildPgctldVolume(), + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: ptr.To("fast-ssd"), + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("20Gi"), + }, + }, + VolumeMode: ptr.To(corev1.PersistentVolumeFilesystem), + }, + }, + }, + }, + }, + }, + "pool without type uses pool index": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shard-002", + Namespace: "default", + UID: "uid-002", + }, + Spec: multigresv1alpha1.ShardSpec{}, + }, + poolSpec: multigresv1alpha1.ShardPoolSpec{ + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("5Gi"), + }, + }, + }, + }, + poolName: "readOnly", + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shard-002-pool-readOnly", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "shard-002-pool-readOnly", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Shard", + Name: "shard-002", + UID: "uid-002", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "shard-002-pool-readOnly-headless", + Replicas: ptr.To(DefaultPoolReplicas), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "shard-002-pool-readOnly", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "shard-002-pool-readOnly", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + buildPgctldInitContainer(&multigresv1alpha1.Shard{}), + buildMultiPoolerSidecar( + &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{Name: "shard-002"}, + }, + multigresv1alpha1.ShardPoolSpec{}, + "readOnly", + ), + }, + Containers: []corev1.Container{ + buildPostgresContainer( + &multigresv1alpha1.Shard{}, + multigresv1alpha1.ShardPoolSpec{}, + ), + }, + Volumes: []corev1.Volume{ + buildPgctldVolume(), + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("5Gi"), + }, + }, + VolumeMode: ptr.To(corev1.PersistentVolumeFilesystem), + }, + }, + }, + }, + }, + }, + "pool with affinity": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shard-affinity", + Namespace: "default", + UID: "affinity-uid", + }, + Spec: multigresv1alpha1.ShardSpec{}, + }, + poolSpec: multigresv1alpha1.ShardPoolSpec{ + Type: "replica", + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "disk-type", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"ssd"}, + }, + }, + }, + }, + }, + }, + }, + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + poolName: "primary", + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shard-affinity-pool-primary", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "shard-affinity-pool-primary", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "Shard", + Name: "shard-affinity", + UID: "affinity-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "shard-affinity-pool-primary-headless", + Replicas: ptr.To(DefaultPoolReplicas), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "shard-affinity-pool-primary", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "shard-affinity-pool-primary", + "app.kubernetes.io/component": PoolComponentName, + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + }, + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + buildPgctldInitContainer(&multigresv1alpha1.Shard{}), + buildMultiPoolerSidecar( + &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{Name: "shard-affinity"}, + }, + multigresv1alpha1.ShardPoolSpec{}, + "primary", + ), + }, + Containers: []corev1.Container{ + buildPostgresContainer( + &multigresv1alpha1.Shard{}, + multigresv1alpha1.ShardPoolSpec{}, + ), + }, + Volumes: []corev1.Volume{ + buildPgctldVolume(), + }, + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "disk-type", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"ssd"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + VolumeMode: ptr.To(corev1.PersistentVolumeFilesystem), + }, + }, + }, + }, + }, + }, + "invalid scheme - should error": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{}, + }, + poolSpec: multigresv1alpha1.ShardPoolSpec{ + Type: "replica", + }, + poolName: "primary", + scheme: runtime.NewScheme(), // empty scheme + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + testScheme := scheme + if tc.scheme != nil { + testScheme = tc.scheme + } + got, err := BuildPoolStatefulSet(tc.shard, tc.poolName, tc.poolSpec, testScheme) + + if (err != nil) != tc.wantErr { + t.Errorf("BuildPoolStatefulSet() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildPoolStatefulSet() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildPoolVolumeClaimTemplates(t *testing.T) { + tests := map[string]struct { + poolSpec multigresv1alpha1.ShardPoolSpec + want []corev1.PersistentVolumeClaim + }{ + "with storage class and size": { + poolSpec: multigresv1alpha1.ShardPoolSpec{ + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + StorageClassName: ptr.To("fast-ssd"), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + want: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: ptr.To("fast-ssd"), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + VolumeMode: ptr.To(corev1.PersistentVolumeFilesystem), + }, + }, + }, + }, + "with volume mode already set": { + poolSpec: multigresv1alpha1.ShardPoolSpec{ + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + VolumeMode: ptr.To(corev1.PersistentVolumeBlock), + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("20Gi"), + }, + }, + }, + }, + want: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeMode: ptr.To(corev1.PersistentVolumeBlock), + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("20Gi"), + }, + }, + }, + }, + }, + }, + "minimal spec sets default VolumeMode": { + poolSpec: multigresv1alpha1.ShardPoolSpec{ + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{}, + }, + want: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: DataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeMode: ptr.To(corev1.PersistentVolumeFilesystem), + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got := buildPoolVolumeClaimTemplates(tc.poolSpec) + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildPoolVolumeClaimTemplates() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/resource-handler/controller/shard/ports.go b/pkg/resource-handler/controller/shard/ports.go new file mode 100644 index 00000000..89a6c88d --- /dev/null +++ b/pkg/resource-handler/controller/shard/ports.go @@ -0,0 +1,106 @@ +package shard + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + // DefaultMultiPoolerHTTPPort is the default port for MultiPooler HTTP traffic. + DefaultMultiPoolerHTTPPort int32 = 15200 + + // DefaultMultiPoolerGRPCPort is the default port for MultiPooler gRPC traffic. + DefaultMultiPoolerGRPCPort int32 = 15270 + + // DefaultPostgresPort is the default port for PostgreSQL protocol traffic. + DefaultPostgresPort int32 = 5432 + + // DefaultMultiOrchHTTPPort is the default port for MultiOrch HTTP traffic. + DefaultMultiOrchHTTPPort int32 = 15300 + + // DefaultMultiOrchGRPCPort is the default port for MultiOrch gRPC traffic. + DefaultMultiOrchGRPCPort int32 = 15370 +) + +// buildMultiPoolerContainerPorts creates the port definitions for the multipooler sidecar container. +// Returns ports for HTTP, gRPC, and PostgreSQL traffic. +func buildMultiPoolerContainerPorts() []corev1.ContainerPort { + return []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: DefaultMultiPoolerHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + ContainerPort: DefaultMultiPoolerGRPCPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + ContainerPort: DefaultPostgresPort, + Protocol: corev1.ProtocolTCP, + }, + } +} + +// buildPoolHeadlessServicePorts creates service ports for the pool headless service. +// Includes HTTP, gRPC, and PostgreSQL ports for StatefulSet pod discovery. +func buildPoolHeadlessServicePorts() []corev1.ServicePort { + return []corev1.ServicePort{ + { + Name: "http", + Port: DefaultMultiPoolerHTTPPort, + TargetPort: intstr.FromString("http"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + Port: DefaultMultiPoolerGRPCPort, + TargetPort: intstr.FromString("grpc"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + Port: DefaultPostgresPort, + TargetPort: intstr.FromString("postgres"), + Protocol: corev1.ProtocolTCP, + }, + } +} + +// buildMultiOrchContainerPorts creates the port definitions for the MultiOrch container. +// Returns ports for HTTP and gRPC traffic. +func buildMultiOrchContainerPorts() []corev1.ContainerPort { + return []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: DefaultMultiOrchHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + ContainerPort: DefaultMultiOrchGRPCPort, + Protocol: corev1.ProtocolTCP, + }, + } +} + +// buildMultiOrchServicePorts creates service ports for the MultiOrch service. +// Includes HTTP and gRPC ports. +func buildMultiOrchServicePorts() []corev1.ServicePort { + return []corev1.ServicePort{ + { + Name: "http", + Port: DefaultMultiOrchHTTPPort, + TargetPort: intstr.FromString("http"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + Port: DefaultMultiOrchGRPCPort, + TargetPort: intstr.FromString("grpc"), + Protocol: corev1.ProtocolTCP, + }, + } +} diff --git a/pkg/resource-handler/controller/shard/ports_test.go b/pkg/resource-handler/controller/shard/ports_test.go new file mode 100644 index 00000000..6aee788e --- /dev/null +++ b/pkg/resource-handler/controller/shard/ports_test.go @@ -0,0 +1,269 @@ +package shard + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func TestBuildMultiPoolerContainerPorts(t *testing.T) { + tests := []struct { + name string + want []corev1.ContainerPort + }{ + { + name: "returns correct ports", + want: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: DefaultMultiPoolerHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + ContainerPort: DefaultMultiPoolerGRPCPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + ContainerPort: DefaultPostgresPort, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := buildMultiPoolerContainerPorts() + + if len(got) != len(tt.want) { + t.Errorf( + "buildMultiPoolerContainerPorts() length = %d, want %d", + len(got), + len(tt.want), + ) + return + } + + for i, port := range got { + if port.Name != tt.want[i].Name { + t.Errorf("port[%d].Name = %s, want %s", i, port.Name, tt.want[i].Name) + } + if port.ContainerPort != tt.want[i].ContainerPort { + t.Errorf( + "port[%d].ContainerPort = %d, want %d", + i, + port.ContainerPort, + tt.want[i].ContainerPort, + ) + } + if port.Protocol != tt.want[i].Protocol { + t.Errorf( + "port[%d].Protocol = %s, want %s", + i, + port.Protocol, + tt.want[i].Protocol, + ) + } + } + }) + } +} + +func TestBuildPoolHeadlessServicePorts(t *testing.T) { + tests := []struct { + name string + want []corev1.ServicePort + }{ + { + name: "returns correct service ports", + want: []corev1.ServicePort{ + { + Name: "http", + Port: DefaultMultiPoolerHTTPPort, + TargetPort: intstr.FromString("http"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + Port: DefaultMultiPoolerGRPCPort, + TargetPort: intstr.FromString("grpc"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "postgres", + Port: DefaultPostgresPort, + TargetPort: intstr.FromString("postgres"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := buildPoolHeadlessServicePorts() + + if len(got) != len(tt.want) { + t.Errorf( + "buildPoolHeadlessServicePorts() length = %d, want %d", + len(got), + len(tt.want), + ) + return + } + + for i, port := range got { + if port.Name != tt.want[i].Name { + t.Errorf("port[%d].Name = %s, want %s", i, port.Name, tt.want[i].Name) + } + if port.Port != tt.want[i].Port { + t.Errorf("port[%d].Port = %d, want %d", i, port.Port, tt.want[i].Port) + } + if port.TargetPort != tt.want[i].TargetPort { + t.Errorf( + "port[%d].TargetPort = %v, want %v", + i, + port.TargetPort, + tt.want[i].TargetPort, + ) + } + if port.Protocol != tt.want[i].Protocol { + t.Errorf( + "port[%d].Protocol = %s, want %s", + i, + port.Protocol, + tt.want[i].Protocol, + ) + } + } + }) + } +} + +func TestBuildMultiOrchContainerPorts(t *testing.T) { + tests := []struct { + name string + want []corev1.ContainerPort + }{ + { + name: "returns correct ports", + want: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: DefaultMultiOrchHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + ContainerPort: DefaultMultiOrchGRPCPort, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := buildMultiOrchContainerPorts() + + if len(got) != len(tt.want) { + t.Errorf( + "buildMultiOrchContainerPorts() length = %d, want %d", + len(got), + len(tt.want), + ) + return + } + + for i, port := range got { + if port.Name != tt.want[i].Name { + t.Errorf("port[%d].Name = %s, want %s", i, port.Name, tt.want[i].Name) + } + if port.ContainerPort != tt.want[i].ContainerPort { + t.Errorf( + "port[%d].ContainerPort = %d, want %d", + i, + port.ContainerPort, + tt.want[i].ContainerPort, + ) + } + if port.Protocol != tt.want[i].Protocol { + t.Errorf( + "port[%d].Protocol = %s, want %s", + i, + port.Protocol, + tt.want[i].Protocol, + ) + } + } + }) + } +} + +func TestBuildMultiOrchServicePorts(t *testing.T) { + tests := []struct { + name string + want []corev1.ServicePort + }{ + { + name: "returns correct service ports", + want: []corev1.ServicePort{ + { + Name: "http", + Port: DefaultMultiOrchHTTPPort, + TargetPort: intstr.FromString("http"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "grpc", + Port: DefaultMultiOrchGRPCPort, + TargetPort: intstr.FromString("grpc"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := buildMultiOrchServicePorts() + + if len(got) != len(tt.want) { + t.Errorf( + "buildMultiOrchServicePorts() length = %d, want %d", + len(got), + len(tt.want), + ) + return + } + + for i, port := range got { + if port.Name != tt.want[i].Name { + t.Errorf("port[%d].Name = %s, want %s", i, port.Name, tt.want[i].Name) + } + if port.Port != tt.want[i].Port { + t.Errorf("port[%d].Port = %d, want %d", i, port.Port, tt.want[i].Port) + } + if port.TargetPort != tt.want[i].TargetPort { + t.Errorf( + "port[%d].TargetPort = %v, want %v", + i, + port.TargetPort, + tt.want[i].TargetPort, + ) + } + if port.Protocol != tt.want[i].Protocol { + t.Errorf( + "port[%d].Protocol = %s, want %s", + i, + port.Protocol, + tt.want[i].Protocol, + ) + } + } + }) + } +} diff --git a/pkg/resource-handler/controller/shard/shard_controller.go b/pkg/resource-handler/controller/shard/shard_controller.go new file mode 100644 index 00000000..6e150d82 --- /dev/null +++ b/pkg/resource-handler/controller/shard/shard_controller.go @@ -0,0 +1,375 @@ +package shard + +import ( + "context" + "fmt" + "slices" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +const ( + finalizerName = "shard.multigres.com/finalizer" +) + +// ShardReconciler reconciles a Shard object. +type ShardReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// Reconcile handles Shard resource reconciliation. +func (r *ShardReconciler) Reconcile( + ctx context.Context, + req ctrl.Request, +) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Fetch the Shard instance + shard := &multigresv1alpha1.Shard{} + if err := r.Get(ctx, req.NamespacedName, shard); err != nil { + if errors.IsNotFound(err) { + logger.Info("Shard resource not found, ignoring") + return ctrl.Result{}, nil + } + logger.Error(err, "Failed to get Shard") + return ctrl.Result{}, err + } + + // Handle deletion + if !shard.DeletionTimestamp.IsZero() { + return r.handleDeletion(ctx, shard) + } + + // Add finalizer if not present + if !slices.Contains(shard.Finalizers, finalizerName) { + shard.Finalizers = append(shard.Finalizers, finalizerName) + if err := r.Update(ctx, shard); err != nil { + logger.Error(err, "Failed to add finalizer") + return ctrl.Result{}, err + } + } + + // Reconcile MultiOrch Deployment + if err := r.reconcileMultiOrchDeployment(ctx, shard); err != nil { + logger.Error(err, "Failed to reconcile MultiOrch Deployment") + return ctrl.Result{}, err + } + + // Reconcile MultiOrch Service + if err := r.reconcileMultiOrchService(ctx, shard); err != nil { + logger.Error(err, "Failed to reconcile MultiOrch Service") + return ctrl.Result{}, err + } + + // Reconcile each pool + for poolName, pool := range shard.Spec.Pools { + if err := r.reconcilePool(ctx, shard, poolName, pool); err != nil { + logger.Error(err, "Failed to reconcile pool", "poolName", poolName) + return ctrl.Result{}, err + } + } + + // Update status + if err := r.updateStatus(ctx, shard); err != nil { + logger.Error(err, "Failed to update status") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// handleDeletion handles cleanup when Shard is being deleted. +func (r *ShardReconciler) handleDeletion( + ctx context.Context, + shard *multigresv1alpha1.Shard, +) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + if slices.Contains(shard.Finalizers, finalizerName) { + // Perform cleanup if needed + // Currently no special cleanup required - owner references handle resource deletion + + // Remove finalizer + shard.Finalizers = slices.DeleteFunc(shard.Finalizers, func(s string) bool { + return s == finalizerName + }) + if err := r.Update(ctx, shard); err != nil { + logger.Error(err, "Failed to remove finalizer") + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil +} + +// reconcileMultiOrchDeployment creates or updates the MultiOrch Deployment. +func (r *ShardReconciler) reconcileMultiOrchDeployment( + ctx context.Context, + shard *multigresv1alpha1.Shard, +) error { + desired, err := BuildMultiOrchDeployment(shard, r.Scheme) + if err != nil { + return fmt.Errorf("failed to build MultiOrch Deployment: %w", err) + } + + existing := &appsv1.Deployment{} + err = r.Get( + ctx, + client.ObjectKey{Namespace: shard.Namespace, Name: desired.Name}, + existing, + ) + if err != nil { + if errors.IsNotFound(err) { + // Create new Deployment + if err := r.Create(ctx, desired); err != nil { + return fmt.Errorf("failed to create MultiOrch Deployment: %w", err) + } + return nil + } + return fmt.Errorf("failed to get MultiOrch Deployment: %w", err) + } + + // Update existing Deployment + existing.Spec = desired.Spec + existing.Labels = desired.Labels + if err := r.Update(ctx, existing); err != nil { + return fmt.Errorf("failed to update MultiOrch Deployment: %w", err) + } + + return nil +} + +// reconcileMultiOrchService creates or updates the MultiOrch Service. +func (r *ShardReconciler) reconcileMultiOrchService( + ctx context.Context, + shard *multigresv1alpha1.Shard, +) error { + desired, err := BuildMultiOrchService(shard, r.Scheme) + if err != nil { + return fmt.Errorf("failed to build MultiOrch Service: %w", err) + } + + existing := &corev1.Service{} + err = r.Get( + ctx, + client.ObjectKey{Namespace: shard.Namespace, Name: desired.Name}, + existing, + ) + if err != nil { + if errors.IsNotFound(err) { + // Create new Service + if err := r.Create(ctx, desired); err != nil { + return fmt.Errorf("failed to create MultiOrch Service: %w", err) + } + return nil + } + return fmt.Errorf("failed to get MultiOrch Service: %w", err) + } + + // Update existing Service + existing.Spec.Ports = desired.Spec.Ports + existing.Spec.Selector = desired.Spec.Selector + existing.Labels = desired.Labels + if err := r.Update(ctx, existing); err != nil { + return fmt.Errorf("failed to update MultiOrch Service: %w", err) + } + + return nil +} + +// reconcilePool creates or updates the StatefulSet and headless Service for a pool. +func (r *ShardReconciler) reconcilePool( + ctx context.Context, + shard *multigresv1alpha1.Shard, + poolName string, + poolSpec multigresv1alpha1.ShardPoolSpec, +) error { + // Reconcile pool StatefulSet + if err := r.reconcilePoolStatefulSet(ctx, shard, poolName, poolSpec); err != nil { + return fmt.Errorf("failed to reconcile pool StatefulSet: %w", err) + } + + // Reconcile pool headless Service + if err := r.reconcilePoolHeadlessService(ctx, shard, poolName, poolSpec); err != nil { + return fmt.Errorf("failed to reconcile pool headless Service: %w", err) + } + + return nil +} + +// reconcilePoolStatefulSet creates or updates the StatefulSet for a pool. +func (r *ShardReconciler) reconcilePoolStatefulSet( + ctx context.Context, + shard *multigresv1alpha1.Shard, + poolName string, + poolSpec multigresv1alpha1.ShardPoolSpec, +) error { + desired, err := BuildPoolStatefulSet(shard, poolName, poolSpec, r.Scheme) + if err != nil { + return fmt.Errorf("failed to build pool StatefulSet: %w", err) + } + + existing := &appsv1.StatefulSet{} + err = r.Get( + ctx, + client.ObjectKey{Namespace: shard.Namespace, Name: desired.Name}, + existing, + ) + if err != nil { + if errors.IsNotFound(err) { + // Create new StatefulSet + if err := r.Create(ctx, desired); err != nil { + return fmt.Errorf("failed to create pool StatefulSet: %w", err) + } + return nil + } + return fmt.Errorf("failed to get pool StatefulSet: %w", err) + } + + // Update existing StatefulSet + existing.Spec = desired.Spec + existing.Labels = desired.Labels + if err := r.Update(ctx, existing); err != nil { + return fmt.Errorf("failed to update pool StatefulSet: %w", err) + } + + return nil +} + +// reconcilePoolHeadlessService creates or updates the headless Service for a pool. +func (r *ShardReconciler) reconcilePoolHeadlessService( + ctx context.Context, + shard *multigresv1alpha1.Shard, + poolName string, + poolSpec multigresv1alpha1.ShardPoolSpec, +) error { + desired, err := BuildPoolHeadlessService(shard, poolName, poolSpec, r.Scheme) + if err != nil { + return fmt.Errorf("failed to build pool headless Service: %w", err) + } + + existing := &corev1.Service{} + err = r.Get( + ctx, + client.ObjectKey{Namespace: shard.Namespace, Name: desired.Name}, + existing, + ) + if err != nil { + if errors.IsNotFound(err) { + // Create new Service + if err := r.Create(ctx, desired); err != nil { + return fmt.Errorf("failed to create pool headless Service: %w", err) + } + return nil + } + return fmt.Errorf("failed to get pool headless Service: %w", err) + } + + // Update existing Service + existing.Spec.Ports = desired.Spec.Ports + existing.Spec.Selector = desired.Spec.Selector + existing.Labels = desired.Labels + if err := r.Update(ctx, existing); err != nil { + return fmt.Errorf("failed to update pool headless Service: %w", err) + } + + return nil +} + +// updateStatus updates the Shard status based on observed state. +func (r *ShardReconciler) updateStatus( + ctx context.Context, + shard *multigresv1alpha1.Shard, +) error { + var totalPods, readyPods int32 + + // Aggregate status from all pool StatefulSets + for poolName := range shard.Spec.Pools { + stsName := buildPoolName(shard.Name, poolName) + sts := &appsv1.StatefulSet{} + err := r.Get( + ctx, + client.ObjectKey{Namespace: shard.Namespace, Name: stsName}, + sts, + ) + if err != nil { + if errors.IsNotFound(err) { + // StatefulSet not created yet, skip + continue + } + return fmt.Errorf("failed to get pool StatefulSet for status: %w", err) + } + + totalPods += sts.Status.Replicas + readyPods += sts.Status.ReadyReplicas + } + + // Update status fields + shard.Status.TotalPods = totalPods + shard.Status.ReadyPods = readyPods + shard.Status.ObservedGeneration = shard.Generation + + // Update conditions + shard.Status.Conditions = r.buildConditions(shard, totalPods, readyPods) + + if err := r.Status().Update(ctx, shard); err != nil { + return fmt.Errorf("failed to update status: %w", err) + } + + return nil +} + +// buildConditions creates status conditions based on observed state. +func (r *ShardReconciler) buildConditions( + shard *multigresv1alpha1.Shard, + totalPods, readyPods int32, +) []metav1.Condition { + conditions := []metav1.Condition{} + + // Available condition + availableCondition := metav1.Condition{ + Type: "Available", + ObservedGeneration: shard.Generation, + LastTransitionTime: metav1.Now(), + } + + if readyPods == totalPods && totalPods > 0 { + availableCondition.Status = metav1.ConditionTrue + availableCondition.Reason = "AllPodsReady" + availableCondition.Message = fmt.Sprintf("All %d pods are ready", readyPods) + } else { + availableCondition.Status = metav1.ConditionFalse + availableCondition.Reason = "NotAllPodsReady" + availableCondition.Message = fmt.Sprintf("%d/%d pods ready", readyPods, totalPods) + } + + conditions = append(conditions, availableCondition) + return conditions +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ShardReconciler) SetupWithManager(mgr ctrl.Manager, opts ...controller.Options) error { + controllerOpts := controller.Options{} + if len(opts) > 0 { + controllerOpts = opts[0] + } + + return ctrl.NewControllerManagedBy(mgr). + For(&multigresv1alpha1.Shard{}). + Owns(&appsv1.Deployment{}). + Owns(&appsv1.StatefulSet{}). + Owns(&corev1.Service{}). + WithOptions(controllerOpts). + Complete(r) +} diff --git a/pkg/resource-handler/controller/shard/shard_controller_internal_test.go b/pkg/resource-handler/controller/shard/shard_controller_internal_test.go new file mode 100644 index 00000000..5d4f64fa --- /dev/null +++ b/pkg/resource-handler/controller/shard/shard_controller_internal_test.go @@ -0,0 +1,511 @@ +package shard + +import ( + "context" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/testutil" +) + +func TestBuildConditions(t *testing.T) { + tests := map[string]struct { + generation int64 + totalPods int32 + readyPods int32 + want []metav1.Condition + }{ + "all pods ready": { + generation: 5, + totalPods: 3, + readyPods: 3, + want: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "AllPodsReady", + Message: "All 3 pods are ready", + ObservedGeneration: 5, + }, + }, + }, + "partial pods ready": { + generation: 10, + totalPods: 5, + readyPods: 2, + want: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionFalse, + Reason: "NotAllPodsReady", + Message: "2/5 pods ready", + ObservedGeneration: 10, + }, + }, + }, + "no pods": { + generation: 1, + totalPods: 0, + readyPods: 0, + want: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionFalse, + Reason: "NotAllPodsReady", + Message: "0/0 pods ready", + ObservedGeneration: 1, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + shard := &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{Generation: tc.generation}, + } + r := &ShardReconciler{} + got := r.buildConditions(shard, tc.totalPods, tc.readyPods) + + // Use go-cmp for exact match, ignoring LastTransitionTime + opts := cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime") + if diff := cmp.Diff(tc.want, got, opts); diff != "" { + t.Errorf("buildConditions() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +// TestBuildMultiOrchContainer_WithImage tests buildMultiOrchContainer with custom image. +// This tests the image override path that was missing coverage. +func TestBuildMultiOrchContainer_WithImage(t *testing.T) { + customImage := "custom/multiorch:v1.2.3" + shard := &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Image: customImage, + }, + }, + } + + container := buildMultiOrchContainer(shard) + + if container.Image != customImage { + t.Errorf("buildMultiOrchContainer() image = %s, want %s", container.Image, customImage) + } + if container.Name != "multiorch" { + t.Errorf("buildMultiOrchContainer() name = %s, want multiorch", container.Name) + } +} + +// TestReconcileMultiOrchDeployment_InvalidScheme tests the error path when BuildMultiOrchDeployment fails. +// This should never happen in production - scheme is properly set up in main.go. +// Test exists for coverage of defensive error handling. +func TestReconcileMultiOrchDeployment_InvalidScheme(t *testing.T) { + // Empty scheme without Shard type registered + invalidScheme := runtime.NewScheme() + + shard := &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"cell1"}, + }, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(invalidScheme). + Build() + + reconciler := &ShardReconciler{ + Client: fakeClient, + Scheme: invalidScheme, + } + + err := reconciler.reconcileMultiOrchDeployment(context.Background(), shard) + if err == nil { + t.Error("reconcileMultiOrchDeployment() should error with invalid scheme") + } +} + +// TestReconcileMultiOrchService_InvalidScheme tests the error path when BuildMultiOrchService fails. +func TestReconcileMultiOrchService_InvalidScheme(t *testing.T) { + invalidScheme := runtime.NewScheme() + + shard := &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(invalidScheme). + Build() + + reconciler := &ShardReconciler{ + Client: fakeClient, + Scheme: invalidScheme, + } + + err := reconciler.reconcileMultiOrchService(context.Background(), shard) + if err == nil { + t.Error("reconcileMultiOrchService() should error with invalid scheme") + } +} + +// TestReconcilePoolStatefulSet_InvalidScheme tests the error path when BuildPoolStatefulSet fails. +func TestReconcilePoolStatefulSet_InvalidScheme(t *testing.T) { + invalidScheme := runtime.NewScheme() + + shard := &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + } + + poolName := "pool1" + poolSpec := multigresv1alpha1.ShardPoolSpec{ + Cell: "cell1", + Database: "db1", + TableGroup: "tg1", + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(invalidScheme). + Build() + + reconciler := &ShardReconciler{ + Client: fakeClient, + Scheme: invalidScheme, + } + + err := reconciler.reconcilePoolStatefulSet(context.Background(), shard, poolName, poolSpec) + if err == nil { + t.Error("reconcilePoolStatefulSet() should error with invalid scheme") + } +} + +// TestReconcilePoolHeadlessService_InvalidScheme tests the error path when BuildPoolHeadlessService fails. +func TestReconcilePoolHeadlessService_InvalidScheme(t *testing.T) { + invalidScheme := runtime.NewScheme() + + shard := &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + } + + poolName := "pool1" + poolSpec := multigresv1alpha1.ShardPoolSpec{ + Cell: "cell1", + Database: "db1", + TableGroup: "tg1", + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(invalidScheme). + Build() + + reconciler := &ShardReconciler{ + Client: fakeClient, + Scheme: invalidScheme, + } + + err := reconciler.reconcilePoolHeadlessService(context.Background(), shard, poolName, poolSpec) + if err == nil { + t.Error("reconcilePoolHeadlessService() should error with invalid scheme") + } +} + +// TestUpdateStatus_PoolStatefulSetNotFound tests the NotFound path in updateStatus. +func TestUpdateStatus_PoolStatefulSetNotFound(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) // Need StatefulSet type registered for Get to work + + shard := &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "pool1": { + Cell: "cell1", + Database: "db1", + TableGroup: "tg1", + }, + }, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(shard). + WithStatusSubresource(&multigresv1alpha1.Shard{}). + Build() + + reconciler := &ShardReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + // Call updateStatus when pool StatefulSet doesn't exist yet + err := reconciler.updateStatus(context.Background(), shard) + if err != nil { + t.Errorf("updateStatus() should not error when pool StatefulSet not found, got: %v", err) + } +} + +// TestHandleDeletion_NoFinalizer tests early return when no finalizer is present. +func TestHandleDeletion_NoFinalizer(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + shard := &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + Finalizers: []string{}, // No finalizer + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(shard). + Build() + + reconciler := &ShardReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + result, err := reconciler.handleDeletion(context.Background(), shard) + if err != nil { + t.Errorf("handleDeletion() should not error when no finalizer, got: %v", err) + } + if result.RequeueAfter > 0 { + t.Error("handleDeletion() should not requeue when no finalizer") + } +} + +// TestReconcileMultiOrchDeployment_GetError tests error path on Get MultiOrch Deployment (not NotFound). +func TestReconcileMultiOrchDeployment_GetError(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + shard := &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"cell1"}, + }, + }, + } + + // Create client with failure injection + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(shard). + Build() + + fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-shard-multiorch", testutil.ErrNetworkTimeout), + }) + + reconciler := &ShardReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + err := reconciler.reconcileMultiOrchDeployment(context.Background(), shard) + if err == nil { + t.Error("reconcileMultiOrchDeployment() should error on Get failure") + } +} + +// TestReconcileMultiOrchService_GetError tests error path on Get MultiOrch Service (not NotFound). +func TestReconcileMultiOrchService_GetError(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + shard := &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + } + + // Create client with failure injection + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(shard). + Build() + + fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-shard-multiorch", testutil.ErrNetworkTimeout), + }) + + reconciler := &ShardReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + err := reconciler.reconcileMultiOrchService(context.Background(), shard) + if err == nil { + t.Error("reconcileMultiOrchService() should error on Get failure") + } +} + +// TestReconcilePoolStatefulSet_GetError tests error path on Get pool StatefulSet (not NotFound). +func TestReconcilePoolStatefulSet_GetError(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + shard := &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + } + + poolName := "pool1" + poolSpec := multigresv1alpha1.ShardPoolSpec{ + Cell: "cell1", + Database: "db1", + TableGroup: "tg1", + } + + // Create client with failure injection + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(shard). + Build() + + fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-shard-pool-pool1", testutil.ErrNetworkTimeout), + }) + + reconciler := &ShardReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + err := reconciler.reconcilePoolStatefulSet(context.Background(), shard, poolName, poolSpec) + if err == nil { + t.Error("reconcilePoolStatefulSet() should error on Get failure") + } +} + +// TestReconcilePoolHeadlessService_GetError tests error path on Get pool headless Service (not NotFound). +func TestReconcilePoolHeadlessService_GetError(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + shard := &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + } + + poolName := "pool1" + poolSpec := multigresv1alpha1.ShardPoolSpec{ + Cell: "cell1", + Database: "db1", + TableGroup: "tg1", + } + + // Create client with failure injection + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(shard). + Build() + + fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-shard-pool-pool1-headless", testutil.ErrNetworkTimeout), + }) + + reconciler := &ShardReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + err := reconciler.reconcilePoolHeadlessService(context.Background(), shard, poolName, poolSpec) + if err == nil { + t.Error("reconcilePoolHeadlessService() should error on Get failure") + } +} + +// TestUpdateStatus_GetError tests error path on Get pool StatefulSet (not NotFound). +func TestUpdateStatus_GetError(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + + shard := &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "pool1": { + Cell: "cell1", + Database: "db1", + TableGroup: "tg1", + }, + }, + }, + } + + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(shard). + WithStatusSubresource(&multigresv1alpha1.Shard{}). + Build() + + fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-shard-pool-pool1", testutil.ErrNetworkTimeout), + }) + + reconciler := &ShardReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + err := reconciler.updateStatus(context.Background(), shard) + if err == nil { + t.Error("updateStatus() should error on Get failure") + } +} diff --git a/pkg/resource-handler/controller/shard/shard_controller_test.go b/pkg/resource-handler/controller/shard/shard_controller_test.go new file mode 100644 index 00000000..0cc27058 --- /dev/null +++ b/pkg/resource-handler/controller/shard/shard_controller_test.go @@ -0,0 +1,1639 @@ +package shard_test + +import ( + "slices" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/shard" + "github.com/numtide/multigres-operator/pkg/testutil" +) + +func TestShardReconciler_Reconcile(t *testing.T) { + t.Parallel() + + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + tests := map[string]struct { + shard *multigresv1alpha1.Shard + existingObjects []client.Object + failureConfig *testutil.FailureConfig + wantErr bool + assertFunc func(t *testing.T, c client.Client, shard *multigresv1alpha1.Shard) + }{ + ////---------------------------------------- + /// Success + //------------------------------------------ + "create all resources for new Shard with single pool": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + Replicas: ptr.To(int32(1)), + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{}, + assertFunc: func(t *testing.T, c client.Client, shard *multigresv1alpha1.Shard) { + // Verify MultiOrch Deployment was created + moDeploy := &appsv1.Deployment{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-shard-multiorch", Namespace: "default"}, + moDeploy); err != nil { + t.Errorf("MultiOrch Deployment should exist: %v", err) + } + + // Verify MultiOrch Service was created + moSvc := &corev1.Service{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-shard-multiorch", Namespace: "default"}, + moSvc); err != nil { + t.Errorf("MultiOrch Service should exist: %v", err) + } + + // Verify Pool StatefulSet was created + poolSts := &appsv1.StatefulSet{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-shard-pool-primary", Namespace: "default"}, + poolSts); err != nil { + t.Errorf("Pool StatefulSet should exist: %v", err) + } + + // Verify Pool headless Service was created + poolSvc := &corev1.Service{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-shard-pool-primary-headless", Namespace: "default"}, + poolSvc); err != nil { + t.Errorf("Pool headless Service should exist: %v", err) + } + + // Verify finalizer was added + updatedShard := &multigresv1alpha1.Shard{} + if err := c.Get(t.Context(), types.NamespacedName{Name: "test-shard", Namespace: "default"}, updatedShard); err != nil { + t.Fatalf("Failed to get Shard: %v", err) + } + if !slices.Contains(updatedShard.Finalizers, "shard.multigres.com/finalizer") { + t.Errorf("Finalizer should be added") + } + }, + }, + "create resources for Shard with multiple pools": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "multi-pool-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "replica": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + Replicas: ptr.To(int32(2)), + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + "readOnly": { + Cell: "zone1", + Type: "readOnly", + Database: "testdb", + TableGroup: "default", + Replicas: ptr.To(int32(3)), + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("5Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{}, + assertFunc: func(t *testing.T, c client.Client, shard *multigresv1alpha1.Shard) { + // Verify replica pool StatefulSet + replicaSts := &appsv1.StatefulSet{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "multi-pool-shard-pool-replica", Namespace: "default"}, + replicaSts); err != nil { + t.Errorf("Replica pool StatefulSet should exist: %v", err) + } else if *replicaSts.Spec.Replicas != 2 { + t.Errorf("Replica pool replicas = %d, want 2", *replicaSts.Spec.Replicas) + } + + // Verify readOnly pool StatefulSet + readOnlySts := &appsv1.StatefulSet{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "multi-pool-shard-pool-readOnly", Namespace: "default"}, + readOnlySts); err != nil { + t.Errorf("ReadOnly pool StatefulSet should exist: %v", err) + } else if *readOnlySts.Spec.Replicas != 3 { + t.Errorf("ReadOnly pool replicas = %d, want 3", *readOnlySts.Spec.Replicas) + } + + // Verify both headless services + replicaSvc := &corev1.Service{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "multi-pool-shard-pool-replica-headless", Namespace: "default"}, + replicaSvc); err != nil { + t.Errorf("Replica pool headless Service should exist: %v", err) + } + + readOnlySvc := &corev1.Service{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "multi-pool-shard-pool-readOnly-headless", Namespace: "default"}, + readOnlySvc); err != nil { + t.Errorf("ReadOnly pool headless Service should exist: %v", err) + } + }, + }, + "update existing resources": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-shard", + Namespace: "default", + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Images: multigresv1alpha1.ShardImagesSpec{ + MultiPooler: "custom/multipooler:v1.0.0", + Postgres: "postgres:16", + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + Replicas: ptr.To(int32(5)), + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("20Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-shard-multiorch", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(2)), + }, + Status: appsv1.DeploymentStatus{ + Replicas: 2, + ReadyReplicas: 2, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-shard-multiorch", + Namespace: "default", + }, + }, + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-shard-pool-primary", + Namespace: "default", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: ptr.To(int32(2)), // will be updated to 5 + }, + Status: appsv1.StatefulSetStatus{ + Replicas: 2, + ReadyReplicas: 2, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-shard-pool-primary-headless", + Namespace: "default", + }, + }, + }, + assertFunc: func(t *testing.T, c client.Client, shard *multigresv1alpha1.Shard) { + poolSts := &appsv1.StatefulSet{} + err := c.Get(t.Context(), types.NamespacedName{ + Name: "existing-shard-pool-primary", + Namespace: "default", + }, poolSts) + if err != nil { + t.Fatalf("Failed to get Pool StatefulSet: %v", err) + } + + if *poolSts.Spec.Replicas != 5 { + t.Errorf("Pool StatefulSet replicas = %d, want 5", *poolSts.Spec.Replicas) + } + }, + }, + "deletion with finalizer": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-deletion", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{ + &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-deletion", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + }, + assertFunc: func(t *testing.T, c client.Client, shard *multigresv1alpha1.Shard) { + updatedShard := &multigresv1alpha1.Shard{} + err := c.Get(t.Context(), + types.NamespacedName{Name: "test-shard-deletion", Namespace: "default"}, + updatedShard) + if err == nil { + t.Errorf( + "Shard object should be deleted but still exists (finalizers: %v)", + updatedShard.Finalizers, + ) + } + }, + }, + "all replicas ready status": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-ready", + Namespace: "default", + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + Replicas: ptr.To(int32(3)), + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-ready-multiorch", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(2)), + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-ready-multiorch", + Namespace: "default", + }, + }, + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-ready-pool-primary", + Namespace: "default", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: ptr.To(int32(3)), + }, + Status: appsv1.StatefulSetStatus{ + Replicas: 3, + ReadyReplicas: 3, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-ready-pool-primary-headless", + Namespace: "default", + }, + }, + }, + assertFunc: func(t *testing.T, c client.Client, shard *multigresv1alpha1.Shard) { + updatedShard := &multigresv1alpha1.Shard{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-shard-ready", Namespace: "default"}, + updatedShard); err != nil { + t.Fatalf("Failed to get Shard: %v", err) + } + + if len(updatedShard.Status.Conditions) == 0 { + t.Error("Status.Conditions should not be empty") + } else { + availableCondition := updatedShard.Status.Conditions[0] + if availableCondition.Type != "Available" { + t.Errorf("Condition type = %s, want Available", availableCondition.Type) + } + if availableCondition.Status != metav1.ConditionTrue { + t.Errorf("Condition status = %s, want True", availableCondition.Status) + } + if availableCondition.Reason != "AllPodsReady" { + t.Errorf("Condition reason = %s, want AllPodsReady", availableCondition.Reason) + } + } + + if updatedShard.Status.TotalPods != 3 { + t.Errorf("TotalPods = %d, want 3", updatedShard.Status.TotalPods) + } + if updatedShard.Status.ReadyPods != 3 { + t.Errorf("ReadyPods = %d, want 3", updatedShard.Status.ReadyPods) + } + }, + }, + "not ready status - partial replicas": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-partial", + Namespace: "default", + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + Replicas: ptr.To(int32(5)), + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-partial-multiorch", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-partial-multiorch", + Namespace: "default", + }, + }, + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-partial-pool-primary", + Namespace: "default", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: ptr.To(int32(5)), + }, + Status: appsv1.StatefulSetStatus{ + Replicas: 5, + ReadyReplicas: 3, // only 3 out of 5 ready + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-partial-pool-primary-headless", + Namespace: "default", + }, + }, + }, + assertFunc: func(t *testing.T, c client.Client, shard *multigresv1alpha1.Shard) { + updatedShard := &multigresv1alpha1.Shard{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-shard-partial", Namespace: "default"}, + updatedShard); err != nil { + t.Fatalf("Failed to get Shard: %v", err) + } + + if len(updatedShard.Status.Conditions) == 0 { + t.Fatal("Status.Conditions should not be empty") + } + + availableCondition := updatedShard.Status.Conditions[0] + if availableCondition.Type != "Available" { + t.Errorf("Condition type = %s, want Available", availableCondition.Type) + } + if availableCondition.Status != metav1.ConditionFalse { + t.Errorf("Condition status = %s, want False", availableCondition.Status) + } + if availableCondition.Reason != "NotAllPodsReady" { + t.Errorf( + "Condition reason = %s, want NotAllPodsReady", + availableCondition.Reason, + ) + } + + if updatedShard.Status.TotalPods != 5 { + t.Errorf("TotalPods = %d, want 5", updatedShard.Status.TotalPods) + } + if updatedShard.Status.ReadyPods != 3 { + t.Errorf("ReadyPods = %d, want 3", updatedShard.Status.ReadyPods) + } + }, + }, + "status with multiple pools": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multi", + Namespace: "default", + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "replica": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + Replicas: ptr.To(int32(2)), + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + "readOnly": { + Cell: "zone1", + Type: "readOnly", + Database: "testdb", + TableGroup: "default", + Replicas: ptr.To(int32(3)), + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("5Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multi-multiorch", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multi-multiorch", + Namespace: "default", + }, + }, + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multi-pool-replica", + Namespace: "default", + }, + Status: appsv1.StatefulSetStatus{ + Replicas: 2, + ReadyReplicas: 2, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multi-pool-replica-headless", + Namespace: "default", + }, + }, + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multi-pool-readOnly", + Namespace: "default", + }, + Status: appsv1.StatefulSetStatus{ + Replicas: 3, + ReadyReplicas: 3, + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multi-pool-readOnly-headless", + Namespace: "default", + }, + }, + }, + assertFunc: func(t *testing.T, c client.Client, shard *multigresv1alpha1.Shard) { + updatedShard := &multigresv1alpha1.Shard{} + if err := c.Get(t.Context(), + types.NamespacedName{Name: "test-shard-multi", Namespace: "default"}, + updatedShard); err != nil { + t.Fatalf("Failed to get Shard: %v", err) + } + + // Total should be 2 + 3 = 5 + if updatedShard.Status.TotalPods != 5 { + t.Errorf("TotalPods = %d, want 5", updatedShard.Status.TotalPods) + } + if updatedShard.Status.ReadyPods != 5 { + t.Errorf("ReadyPods = %d, want 5", updatedShard.Status.ReadyPods) + } + }, + }, + ////---------------------------------------- + /// Error + //------------------------------------------ + "error on status update": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnStatusUpdate: testutil.FailOnObjectName("test-shard", testutil.ErrInjected), + }, + wantErr: true, + }, + "error on MultiOrch Deployment create": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnCreate: func(obj client.Object) error { + if deploy, ok := obj.(*appsv1.Deployment); ok && + deploy.Name == "test-shard-multiorch" { + return testutil.ErrPermissionError + } + return nil + }, + }, + wantErr: true, + }, + "error on MultiOrch Deployment Update": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(2)), + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: func(obj client.Object) error { + if deploy, ok := obj.(*appsv1.Deployment); ok && + deploy.Name == "test-shard-multiorch" { + return testutil.ErrInjected + } + return nil + }, + }, + wantErr: true, + }, + "error on Get MultiOrch Deployment (network error)": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnGet: func(key client.ObjectKey) error { + if key.Name == "test-shard-multiorch" { + return testutil.ErrNetworkTimeout + } + return nil + }, + }, + wantErr: true, + }, + "error on MultiOrch Service create": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnCreate: func(obj client.Object) error { + if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-shard-multiorch" { + return testutil.ErrPermissionError + } + return nil + }, + }, + wantErr: true, + }, + "error on MultiOrch Service Update": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: func(obj client.Object) error { + if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-shard-multiorch" { + return testutil.ErrInjected + } + return nil + }, + }, + wantErr: true, + }, + "error on Get MultiOrch Service (network error)": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-svc", + Namespace: "default", + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-svc-multiorch", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnGet: testutil.FailOnNamespacedKeyName( + "test-shard-svc-multiorch", + "default", + testutil.ErrNetworkTimeout, + ), + }, + wantErr: true, + }, + "error on Pool StatefulSet create": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnCreate: func(obj client.Object) error { + if sts, ok := obj.(*appsv1.StatefulSet); ok && + sts.Name == "test-shard-pool-primary" { + return testutil.ErrPermissionError + } + return nil + }, + }, + wantErr: true, + }, + "error on Pool StatefulSet Update": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + Replicas: ptr.To(int32(5)), + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + }, + }, + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-pool-primary", + Namespace: "default", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: ptr.To(int32(2)), + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: func(obj client.Object) error { + if sts, ok := obj.(*appsv1.StatefulSet); ok && + sts.Name == "test-shard-pool-primary" { + return testutil.ErrInjected + } + return nil + }, + }, + wantErr: true, + }, + "error on Get Pool StatefulSet (network error)": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnGet: func(key client.ObjectKey) error { + if key.Name == "test-shard-pool-primary" { + return testutil.ErrNetworkTimeout + } + return nil + }, + }, + wantErr: true, + }, + "error on Pool Service create": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnCreate: func(obj client.Object) error { + if svc, ok := obj.(*corev1.Service); ok && + svc.Name == "test-shard-pool-primary-headless" { + return testutil.ErrPermissionError + } + return nil + }, + }, + wantErr: true, + }, + "error on Pool Service Update": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + }, + }, + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-pool-primary", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-pool-primary-headless", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: func(obj client.Object) error { + if svc, ok := obj.(*corev1.Service); ok && + svc.Name == "test-shard-pool-primary-headless" { + return testutil.ErrInjected + } + return nil + }, + }, + wantErr: true, + }, + "error on Get Pool Service (network error)": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-multiorch", + Namespace: "default", + }, + }, + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-pool-primary", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnGet: func(key client.ObjectKey) error { + if key.Name == "test-shard-pool-primary-headless" && + key.Namespace == "default" { + return testutil.ErrNetworkTimeout + } + return nil + }, + }, + wantErr: true, + }, + "error on finalizer Update": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnUpdate: testutil.FailOnObjectName("test-shard", testutil.ErrInjected), + }, + wantErr: true, + }, + "deletion error on finalizer removal": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-del", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{ + &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-del", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + OnUpdate: testutil.FailOnObjectName("test-shard-del", testutil.ErrInjected), + }, + wantErr: true, + }, + "error on Get Shard (network error)": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard", + Namespace: "default", + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{}, + failureConfig: &testutil.FailureConfig{ + OnGet: testutil.FailOnKeyName("test-shard", testutil.ErrNetworkTimeout), + }, + wantErr: true, + }, + "error on Get Pool StatefulSet in updateStatus (network error)": { + shard: &multigresv1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-status", + Namespace: "default", + Finalizers: []string{"shard.multigres.com/finalizer"}, + }, + Spec: multigresv1alpha1.ShardSpec{ + MultiOrch: multigresv1alpha1.MultiOrchSpec{ + Cells: []string{"zone1"}, + }, + Pools: map[string]multigresv1alpha1.ShardPoolSpec{ + "primary": { + Cell: "zone1", + Type: "replica", + Database: "testdb", + TableGroup: "default", + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + existingObjects: []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-status-multiorch", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-status-multiorch", + Namespace: "default", + }, + }, + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-status-pool-primary", + Namespace: "default", + }, + }, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-shard-status-pool-primary-headless", + Namespace: "default", + }, + }, + }, + failureConfig: &testutil.FailureConfig{ + // Fail Pool StatefulSet Get after successful reconciliation calls + // Get calls: 1=Shard, 2=MultiOrchDeploy, 3=MultiOrchSvc, 4=PoolSts, 5=PoolSvc, 6=PoolSts(status) + OnGet: testutil.FailKeyAfterNCalls(5, testutil.ErrNetworkTimeout), + }, + wantErr: true, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Create base fake client + baseClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(tc.existingObjects...). + WithStatusSubresource(&multigresv1alpha1.Shard{}). + Build() + + fakeClient := client.Client(baseClient) + // Wrap with failure injection if configured + if tc.failureConfig != nil { + fakeClient = testutil.NewFakeClientWithFailures(baseClient, tc.failureConfig) + } + + reconciler := &shard.ShardReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + // Create the Shard resource if not in existing objects + shardInExisting := false + for _, obj := range tc.existingObjects { + if shard, ok := obj.(*multigresv1alpha1.Shard); ok && shard.Name == tc.shard.Name { + shardInExisting = true + break + } + } + if !shardInExisting { + err := fakeClient.Create(t.Context(), tc.shard) + if err != nil { + t.Fatalf("Failed to create Shard: %v", err) + } + } + + // Reconcile + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: tc.shard.Name, + Namespace: tc.shard.Namespace, + }, + } + + result, err := reconciler.Reconcile(t.Context(), req) + if (err != nil) != tc.wantErr { + t.Errorf("Reconcile() error = %v, wantErr %v", err, tc.wantErr) + return + } + if tc.wantErr { + return + } + + // NOTE: Check for requeue delay when we need to support such setup. + _ = result + + // Run custom assertions if provided + if tc.assertFunc != nil { + tc.assertFunc(t, fakeClient, tc.shard) + } + }) + } +} + +func TestShardReconciler_ReconcileNotFound(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + reconciler := &shard.ShardReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + // Reconcile non-existent resource + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "nonexistent-shard", + Namespace: "default", + }, + } + + result, err := reconciler.Reconcile(t.Context(), req) + if err != nil { + t.Errorf("Reconcile() should not error on NotFound, got: %v", err) + } + if result.RequeueAfter > 0 { + t.Errorf("Reconcile() should not requeue on NotFound") + } +} diff --git a/pkg/resource-handler/controller/testutil/compare.go b/pkg/resource-handler/controller/testutil/compare.go deleted file mode 100644 index 936ce3d4..00000000 --- a/pkg/resource-handler/controller/testutil/compare.go +++ /dev/null @@ -1,208 +0,0 @@ -package testutil - -import ( - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -// IgnoreMetaRuntimeFields returns cmp.Options that ignore runtime-generated -// Kubernetes fields. Use this when comparing expected vs actual Kubernetes -// resources in tests. -func IgnoreMetaRuntimeFields() cmp.Options { - return cmp.Options{ - // Ignore ObjectMeta fields that are set by the API server - cmpopts.IgnoreFields(metav1.ObjectMeta{}, - "UID", // Generated by API server - "ResourceVersion", // Changes on every update - "Generation", // Auto-incremented - "CreationTimestamp", // Set at creation time - "DeletionTimestamp", // Set when deletion starts - "DeletionGracePeriodSeconds", - "ManagedFields", // Server-side apply tracking - "SelfLink", // Deprecated but may exist - ), - - // Ignore TypeMeta fields (often empty in client responses) - // NOTE: This is commented out as this has no impact in most cases. - // Consider removing this altogether. - // cmpopts.IgnoreFields(metav1.TypeMeta{}, - // "Kind", - // "APIVersion", - // ), - - // Ignore Status subresource (tested separately) - cmpopts.IgnoreFields(metav1.Condition{}, - "LastTransitionTime", // Time-based, always different - ), - - // Ignore time.Time fields (always different) - cmpopts.IgnoreTypes(time.Time{}), - cmpopts.IgnoreTypes(metav1.Time{}), - - // Ignore UID type - cmpopts.IgnoreTypes(types.UID("")), - } -} - -// IgnoreServiceRuntimeFields ignores Service fields that are assigned at -// runtime by Kubernetes. -func IgnoreServiceRuntimeFields() cmp.Option { - return cmpopts.IgnoreFields(corev1.ServiceSpec{}, - // Randomly allocated by Kubernetes from service CIDR range - "ClusterIP", - "ClusterIPs", - - // Derived from cluster IP stack configuration (IPv4, IPv6, or dual-stack) - "IPFamilies", - "IPFamilyPolicy", - - // Default policy set based on cluster configuration - "InternalTrafficPolicy", - - // Defaults to "None" - "SessionAffinity", - ) -} - -// IgnoreStatefulSetRuntimeFields ignores StatefulSet status fields that are -// updated at runtime by the StatefulSet controller. -func IgnoreStatefulSetRuntimeFields() cmp.Option { - return cmpopts.IgnoreFields(appsv1.StatefulSetStatus{}, - // Replica state counters (updated by controller based on pod status) - "Replicas", - "ReadyReplicas", - "CurrentReplicas", - "UpdatedReplicas", - "AvailableReplicas", - - // Revision tracking (generated hashes by controller for rolling updates) - "CurrentRevision", - "UpdateRevision", - - // Controller reconciliation metadata - "ObservedGeneration", // Tracks which spec generation was reconciled - "CollisionCount", // Counter for name collision avoidance - "Conditions", // Status conditions updated by controller - ) -} - -// IgnoreDeploymentRuntimeFields ignores Deployment status fields that are -// updated at runtime by the Deployment controller. -func IgnoreDeploymentRuntimeFields() cmp.Option { - return cmpopts.IgnoreFields(appsv1.DeploymentStatus{}, - // Replica state counters (updated by controller based on ReplicaSet status) - "Replicas", - "ReadyReplicas", - "AvailableReplicas", - "UnavailableReplicas", - "UpdatedReplicas", - - // Controller reconciliation metadata - "ObservedGeneration", // Tracks which spec generation was reconciled - "CollisionCount", // Counter for ReplicaSet name collision avoidance - "Conditions", // Status conditions updated by controller - ) -} - -// IgnorePodSpecDefaults ignores all PodSpec and Container defaults applied by -// Kubernetes, including ImagePullPolicy. Use when you only care about -// explicitly set fields. -func IgnorePodSpecDefaults() cmp.Option { - return cmp.Options{ - cmpopts.IgnoreFields(corev1.PodSpec{}, - // Pod lifecycle defaults - "RestartPolicy", // Defaults to "Always" - "TerminationGracePeriodSeconds", // Defaults to 30 - "DNSPolicy", // Defaults to "ClusterFirst" - "SchedulerName", // Defaults to "default-scheduler" - "SecurityContext", // Default PodSecurityContext applied - ), - cmpopts.IgnoreFields(corev1.Container{}, - // Container defaults - "TerminationMessagePath", // Defaults to "/dev/termination-log" - "TerminationMessagePolicy", // Defaults to "File" - "ImagePullPolicy", // Defaults: Always for :latest, IfNotPresent otherwise - ), - } -} - -// IgnorePodSpecDefaultsExceptPullPolicy ignores PodSpec defaults but preserves -// ImagePullPolicy for verification. Use when you want to assert ImagePullPolicy -// is correct. -func IgnorePodSpecDefaultsExceptPullPolicy() cmp.Option { - return cmp.Options{ - cmpopts.IgnoreFields(corev1.PodSpec{}, - // Pod lifecycle defaults - "RestartPolicy", // Defaults to "Always" - "TerminationGracePeriodSeconds", // Defaults to 30 - "DNSPolicy", // Defaults to "ClusterFirst" - "SchedulerName", // Defaults to "default-scheduler" - "SecurityContext", // Default PodSecurityContext applied - ), - cmpopts.IgnoreFields(corev1.Container{}, - // Container termination logging - "TerminationMessagePath", // Defaults to "/dev/termination-log" - "TerminationMessagePolicy", // Defaults to "File" - // Note: ImagePullPolicy NOT ignored - preserved for assertions - ), - } -} - -// IgnoreStatefulSetSpecDefaults ignores StatefulSetSpec fields that have -// Kubernetes defaults applied. -func IgnoreStatefulSetSpecDefaults() cmp.Option { - return cmpopts.IgnoreFields(appsv1.StatefulSetSpec{}, - // StatefulSet-specific defaults - "PodManagementPolicy", // Defaults to "OrderedReady" - "RevisionHistoryLimit", // Defaults to 10 - "UpdateStrategy", // Defaults to RollingUpdate with partition=0 - "PersistentVolumeClaimRetentionPolicy", // Defaults to Retain/Retain - ) -} - -// IgnoreDeploymentSpecDefaults ignores DeploymentSpec fields that have -// Kubernetes defaults applied. -func IgnoreDeploymentSpecDefaults() cmp.Option { - return cmpopts.IgnoreFields( - appsv1.DeploymentSpec{}, - // Deployment-specific defaults - "Strategy", // Defaults to RollingUpdate with MaxSurge=25%, MaxUnavailable=25% - "RevisionHistoryLimit", // Defaults to 10 - "ProgressDeadlineSeconds", // Defaults to 600 seconds (10 minutes) - ) -} - -// IgnoreObjectMetaCompletely ignores the entire ObjectMeta (use when you only -// care about Spec). -func IgnoreObjectMetaCompletely() cmp.Option { - return cmpopts.IgnoreFields(metav1.ObjectMeta{}) -} - -// IgnoreStatus ignores the Status subresource completely. -func IgnoreStatus() cmp.Option { - return cmpopts.IgnoreFields(struct{ Status any }{}, "Status") -} - -// CompareOptions returns common options for comparing Kubernetes objects. -// By default, ignores metadata and status. -func CompareOptions() cmp.Options { - return cmp.Options{ - IgnoreMetaRuntimeFields(), - IgnoreStatus(), - } -} - -// CompareSpecOnly returns options for comparing only Spec fields. -// Ignores all metadata and status. -func CompareSpecOnly() cmp.Options { - return cmp.Options{ - IgnoreObjectMetaCompletely(), - IgnoreStatus(), - } -} diff --git a/pkg/resource-handler/controller/testutil/envtest.go b/pkg/resource-handler/controller/testutil/envtest.go deleted file mode 100644 index 1320edd3..00000000 --- a/pkg/resource-handler/controller/testutil/envtest.go +++ /dev/null @@ -1,197 +0,0 @@ -package testutil - -import ( - "fmt" - "os" - "path/filepath" - "testing" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/manager" - metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" -) - -func SetUpEnvtest(t testing.TB) *rest.Config { - t.Helper() - - testEnv := &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join( - // Go back up to repo root - "../../../../", - "config", "crd", "bases")}, - ErrorIfCRDPathMissing: true, - } - cfg, err := testEnv.Start() - if err != nil { - t.Fatalf("Setting up with envtest failed, %v", err) - } - t.Cleanup(func() { - if err := testEnv.Stop(); err != nil { - t.Fatalf("Failed to stop envtest, %v", err) - } - }) - - return cfg -} - -// SetUpEnvtestWithKubeconfig starts Kubernetes API server for testing, and -// keeps it running for further debugging. -func SetUpEnvtestWithKubeconfig(t testing.TB) (*rest.Config, func()) { - t.Helper() - - testEnv := &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join( - // Go back up to repo root - "../../../../", - "config", "crd", "bases")}, - ErrorIfCRDPathMissing: true, - } - cfg, err := testEnv.Start() - if err != nil { - t.Fatalf("Setting up with envtest failed, %v", err) - } - // Purposely no clean-up to run so that the envtest setup can be debugged - // further. - - // Write kubeconfig to file - user, err := testEnv.ControlPlane.AddUser(envtest.User{ - Name: "envtest-admin", - Groups: []string{"system:masters"}, - }, nil) - if err != nil { - t.Fatalf("Failed to add user for testing, %v", err) - } - - kubeconfig, err := user.KubeConfig() - if err != nil { - t.Fatalf("Failed to generate kubeconfig, %v", err) - } - - kubeconfigPath := filepath.Join(os.TempDir(), "envtest-kubeconfig") - if err := os.WriteFile(kubeconfigPath, kubeconfig, 0o644); err != nil { - t.Fatalf("Failed to write kubeconfig to file, %v", err) - } - - t.Cleanup(func() { - fmt.Printf("Kubeconfig written to: %s\n", kubeconfigPath) - fmt.Printf("Connect with: export KUBECONFIG=%s\n", kubeconfigPath) - }) - - return cfg, func() { - if err := testEnv.Stop(); err != nil { - t.Fatalf("Failed to stop envtest, %v", err) - } - } -} - -// SetUpClient creates a direct Kubernetes client (non-cached). -// -// IMPORTANT: This creates a client that bypasses the manager's cache and reads -// directly from the API server. This is different from mgr.GetClient() which -// uses cached reads (same as controllers). -// -// When to use SetUpClient: -// -// 1. Testing cache synchronization: -// - Verify what's actually in the API server vs what the cache sees -// - Useful when debugging "why doesn't my controller see this resource?" -// -// 2. Strong consistency requirements: -// -// - Need immediate reads after writes (no cache lag) -// -// - Testing race conditions or timing-sensitive behavior -// -// 3. Comparing cached vs direct reads: -// directClient := SetUpClient(t, cfg, scheme) -// cachedClient := mgr.GetClient() -// // Create resource -// cachedClient.Create(ctx, obj) -// // Direct read (guaranteed to see it) -// directClient.Get(ctx, key, &actual) -// // Cached read (might lag slightly) -// cachedClient.Get(ctx, key, &fromCache) -// -// For most tests, use mgr.GetClient() instead - it tests what controllers actually see. -func SetUpClient(t testing.TB, cfg *rest.Config, scheme *runtime.Scheme) client.Client { - t.Helper() - - k8sClient, err := client.New(cfg, client.Options{Scheme: scheme}) - if err != nil { - t.Fatalf("Failed to setup a Kubernetes client: %v", err) - } - - return k8sClient -} - -func SetUpManager(t testing.TB, cfg *rest.Config, scheme *runtime.Scheme) manager.Manager { - t.Helper() - - mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme, - LeaderElection: false, - - // WebhookServer: webhook.NewServer(webhook.Options{ - // // Host: webhookInstallOptions.LocalServingHost, - // // Port: webhookInstallOptions.LocalServingPort, - // // CertDir: webhookInstallOptions.LocalServingCertDir, - // }), - Metrics: metricsserver.Options{ - BindAddress: "0", - }, - }) - if err != nil { - t.Fatalf("Failed to start manager: %v", err) - } - - return mgr -} - -func StartManager(t testing.TB, mgr manager.Manager) { - t.Helper() - - // t.Context gets cancelled before the test cleanup function runs. - ctx := t.Context() - go func() { - if err := mgr.Start(ctx); err != nil { - t.Errorf("Manager failed: %v", err) - } - }() - - // Wait for cache to sync - if !mgr.GetCache().WaitForCacheSync(ctx) { - t.Fatal("Cache failed to sync") - } -} - -// SetUpEnvtestManager is a convenience function that combines SetUpEnvtest, -// SetUpManager, and StartManager into a single call. -// -// This is the recommended way to set up integration tests: -// -// mgr := testutil.SetUpEnvtestManager(t, scheme) -// c := mgr.GetClient() -// -// // Setup your controller -// reconciler := &YourReconciler{Client: c, Scheme: scheme} -// reconciler.SetupWithManager(mgr) -// -// Note: envtest does not support garbage collection (cascading deletion via owner references) -// because it only runs kube-apiserver and etcd, not kube-controller-manager where the -// garbage collector controller runs. To test cascading deletion, use kind with -// UseExistingCluster: true, or test that owner references are set correctly instead. -// -// For more control, use the individual functions instead. -func SetUpEnvtestManager(t testing.TB, scheme *runtime.Scheme) manager.Manager { - t.Helper() - - cfg := SetUpEnvtest(t) - mgr := SetUpManager(t, cfg, scheme) - StartManager(t, mgr) - - return mgr -} diff --git a/pkg/resource-handler/controller/testutil/envtest_test.go b/pkg/resource-handler/controller/testutil/envtest_test.go deleted file mode 100644 index 4d7c54fc..00000000 --- a/pkg/resource-handler/controller/testutil/envtest_test.go +++ /dev/null @@ -1,52 +0,0 @@ -//go:build integration -// +build integration - -package testutil_test - -import ( - "testing" - - "github.com/numtide/multigres-operator/pkg/resource-handler/controller/testutil" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/healthz" -) - -func TestSetupEnvTest(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - cfg := testutil.SetUpEnvtest(t) - mgr := testutil.SetUpManager(t, cfg, scheme) - - if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { - t.Fatalf("Failed to set up health check, %v", err) - } - if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { - t.Fatalf("Failed to set up ready check, %v", err) - } - - testutil.StartManager(t, mgr) -} - -func TestSetupEnvTestWithKubeconfig(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - cfg, shutdown := testutil.SetUpEnvtestWithKubeconfig(t) - defer shutdown() - mgr := testutil.SetUpManager(t, cfg, scheme) - - if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { - t.Fatalf("Failed to set up health check, %v", err) - } - if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { - t.Fatalf("Failed to set up ready check, %v", err) - } - - testutil.StartManager(t, mgr) -} diff --git a/pkg/resource-handler/controller/testutil/fake_client.go b/pkg/resource-handler/controller/testutil/fake_client.go deleted file mode 100644 index d965eb16..00000000 --- a/pkg/resource-handler/controller/testutil/fake_client.go +++ /dev/null @@ -1,294 +0,0 @@ -// Package testutil provides testing utilities for controller tests. -package testutil - -import ( - "context" - "fmt" - - "k8s.io/apimachinery/pkg/api/meta" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// FailureConfig configures when the fake client should return errors. -// Each field is a function that receives the object/key and returns an error if the operation should fail. -type FailureConfig struct { - // OnGet is called before Get operations. Return non-nil to fail the operation. - OnGet func(key client.ObjectKey) error - - // OnList is called before List operations. Return non-nil to fail the operation. - OnList func(list client.ObjectList) error - - // OnCreate is called before Create operations. Return non-nil to fail the operation. - OnCreate func(obj client.Object) error - - // OnUpdate is called before Update operations. Return non-nil to fail the operation. - OnUpdate func(obj client.Object) error - - // OnPatch is called before Patch operations. Return non-nil to fail the operation. - OnPatch func(obj client.Object) error - - // OnDelete is called before Delete operations. Return non-nil to fail the operation. - OnDelete func(obj client.Object) error - - // OnDeleteAllOf is called before DeleteAllOf operations. Return non-nil to fail the operation. - OnDeleteAllOf func(obj client.Object) error - - // OnStatusUpdate is called before Status().Update() operations. Return non-nil to fail the operation. - OnStatusUpdate func(obj client.Object) error - - // OnStatusPatch is called before Status().Patch() operations. Return non-nil to fail the operation. - OnStatusPatch func(obj client.Object) error -} - -// fakeClientWithFailures wraps a real fake client and injects failures based on configuration. -type fakeClientWithFailures struct { - client.Client - config *FailureConfig -} - -// NewFakeClientWithFailures creates a fake client that can be configured to fail operations. -// This is useful for testing error handling paths in controllers. -func NewFakeClientWithFailures(baseClient client.Client, config *FailureConfig) client.Client { - if config == nil { - config = &FailureConfig{} - } - return &fakeClientWithFailures{ - Client: baseClient, - config: config, - } -} - -func (c *fakeClientWithFailures) Get( - ctx context.Context, - key client.ObjectKey, - obj client.Object, - opts ...client.GetOption, -) error { - if c.config.OnGet != nil { - if err := c.config.OnGet(key); err != nil { - return err - } - } - return c.Client.Get(ctx, key, obj, opts...) -} - -func (c *fakeClientWithFailures) List( - ctx context.Context, - list client.ObjectList, - opts ...client.ListOption, -) error { - if c.config.OnList != nil { - if err := c.config.OnList(list); err != nil { - return err - } - } - return c.Client.List(ctx, list, opts...) -} - -func (c *fakeClientWithFailures) Create( - ctx context.Context, - obj client.Object, - opts ...client.CreateOption, -) error { - if c.config.OnCreate != nil { - if err := c.config.OnCreate(obj); err != nil { - return err - } - } - return c.Client.Create(ctx, obj, opts...) -} - -func (c *fakeClientWithFailures) Update( - ctx context.Context, - obj client.Object, - opts ...client.UpdateOption, -) error { - if c.config.OnUpdate != nil { - if err := c.config.OnUpdate(obj); err != nil { - return err - } - } - return c.Client.Update(ctx, obj, opts...) -} - -func (c *fakeClientWithFailures) Patch( - ctx context.Context, - obj client.Object, - patch client.Patch, - opts ...client.PatchOption, -) error { - if c.config.OnPatch != nil { - if err := c.config.OnPatch(obj); err != nil { - return err - } - } - return c.Client.Patch(ctx, obj, patch, opts...) -} - -func (c *fakeClientWithFailures) Delete( - ctx context.Context, - obj client.Object, - opts ...client.DeleteOption, -) error { - if c.config.OnDelete != nil { - if err := c.config.OnDelete(obj); err != nil { - return err - } - } - return c.Client.Delete(ctx, obj, opts...) -} - -func (c *fakeClientWithFailures) DeleteAllOf( - ctx context.Context, - obj client.Object, - opts ...client.DeleteAllOfOption, -) error { - if c.config.OnDeleteAllOf != nil { - if err := c.config.OnDeleteAllOf(obj); err != nil { - return err - } - } - return c.Client.DeleteAllOf(ctx, obj, opts...) -} - -func (c *fakeClientWithFailures) Status() client.StatusWriter { - return &statusWriterWithFailures{ - StatusWriter: c.Client.Status(), - config: c.config, - } -} - -type statusWriterWithFailures struct { - client.StatusWriter - config *FailureConfig -} - -func (s *statusWriterWithFailures) Update( - ctx context.Context, - obj client.Object, - opts ...client.SubResourceUpdateOption, -) error { - if s.config.OnStatusUpdate != nil { - if err := s.config.OnStatusUpdate(obj); err != nil { - return err - } - } - return s.StatusWriter.Update(ctx, obj, opts...) -} - -func (s *statusWriterWithFailures) Patch( - ctx context.Context, - obj client.Object, - patch client.Patch, - opts ...client.SubResourcePatchOption, -) error { - if s.config.OnStatusPatch != nil { - if err := s.config.OnStatusPatch(obj); err != nil { - return err - } - } - return s.StatusWriter.Patch(ctx, obj, patch, opts...) -} - -// Helper functions for common failure scenarios - -// FailOnObjectName returns an error if the object name matches. -func FailOnObjectName(name string, err error) func(client.Object) error { - return func(obj client.Object) error { - accessor, metaErr := meta.Accessor(obj) - if metaErr != nil { - panic(fmt.Sprintf("meta.Accessor failed: %v", metaErr)) - } - if accessor.GetName() == name { - return err - } - return nil - } -} - -// FailOnKeyName returns an error if the key name matches. -func FailOnKeyName(name string, err error) func(client.ObjectKey) error { - return func(key client.ObjectKey) error { - if key.Name == name { - return err - } - return nil - } -} - -// FailOnNamespacedKeyName returns an error if both the key name and namespace match. -func FailOnNamespacedKeyName(name, namespace string, err error) func(client.ObjectKey) error { - return func(key client.ObjectKey) error { - if key.Name == name && key.Namespace == namespace { - return err - } - return nil - } -} - -// FailOnNamespace returns an error if the namespace matches. -func FailOnNamespace(namespace string, err error) func(client.Object) error { - return func(obj client.Object) error { - accessor, metaErr := meta.Accessor(obj) - if metaErr != nil { - panic(fmt.Sprintf("meta.Accessor failed: %v", metaErr)) - } - if accessor.GetNamespace() == namespace { - return err - } - return nil - } -} - -// AlwaysFail returns the given error for all operations. -func AlwaysFail(err error) func(any) error { - return func(interface{}) error { - return err - } -} - -// FailKeyAfterNCalls returns an ObjectKey failure function that fails after N successful calls. -// Use for OnGet. -func FailKeyAfterNCalls(n int, err error) func(client.ObjectKey) error { - count := 0 - return func(client.ObjectKey) error { - count++ - if count > n { - return err - } - return nil - } -} - -// FailObjAfterNCalls returns an Object failure function that fails after N successful calls. -// Use for OnCreate, OnUpdate, OnDelete, OnPatch, OnDeleteAllOf, OnStatusUpdate, OnStatusPatch. -func FailObjAfterNCalls(n int, err error) func(client.Object) error { - count := 0 - return func(client.Object) error { - count++ - if count > n { - return err - } - return nil - } -} - -// FailObjListAfterNCalls returns an ObjectList failure function that fails after N successful calls. -// Use for OnList. -func FailObjListAfterNCalls(n int, err error) func(client.ObjectList) error { - count := 0 - return func(client.ObjectList) error { - count++ - if count > n { - return err - } - return nil - } -} - -// Common errors for testing -var ( - ErrInjected = fmt.Errorf("injected test error") - ErrNetworkTimeout = fmt.Errorf("network timeout") - ErrPermissionError = fmt.Errorf("permission denied") -) diff --git a/pkg/resource-handler/controller/testutil/fake_client_test.go b/pkg/resource-handler/controller/testutil/fake_client_test.go deleted file mode 100644 index fb7b7767..00000000 --- a/pkg/resource-handler/controller/testutil/fake_client_test.go +++ /dev/null @@ -1,726 +0,0 @@ -package testutil - -import ( - "context" - "testing" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func TestFakeClientWithFailures_Get(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "default", - }, - } - - tests := map[string]struct { - config *FailureConfig - key client.ObjectKey - wantErr bool - }{ - "no failure - get succeeds": { - config: nil, - key: client.ObjectKey{ - Name: "test-pod", - Namespace: "default", - }, - wantErr: false, - }, - "fail on specific name": { - config: &FailureConfig{ - OnGet: FailOnKeyName("test-pod", ErrInjected), - }, - key: client.ObjectKey{ - Name: "test-pod", - Namespace: "default", - }, - wantErr: true, - }, - "no failure on different name": { - config: &FailureConfig{ - OnGet: FailOnKeyName("other-pod", ErrInjected), - }, - key: client.ObjectKey{ - Name: "test-pod", - Namespace: "default", - }, - wantErr: false, - }, - "always fail": { - config: &FailureConfig{ - OnGet: func(key client.ObjectKey) error { - return ErrInjected - }, - }, - key: client.ObjectKey{ - Name: "test-pod", - Namespace: "default", - }, - wantErr: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - t.Parallel() - - baseClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(pod). - Build() - - fakeClient := NewFakeClientWithFailures(baseClient, tc.config) - - result := &corev1.Pod{} - err := fakeClient.Get(context.Background(), tc.key, result) - - if (err != nil) != tc.wantErr { - t.Errorf("Get() error = %v, wantErr %v", err, tc.wantErr) - } - }) - } -} - -func TestFakeClientWithFailures_Create(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - tests := map[string]struct { - config *FailureConfig - obj *corev1.Pod - wantErr bool - }{ - "no failure - create succeeds": { - config: nil, - obj: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "new-pod", - Namespace: "default", - }, - }, - wantErr: false, - }, - "fail on specific object name": { - config: &FailureConfig{ - OnCreate: FailOnObjectName("new-pod", ErrPermissionError), - }, - obj: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "new-pod", - Namespace: "default", - }, - }, - wantErr: true, - }, - "no failure on different object name": { - config: &FailureConfig{ - OnCreate: FailOnObjectName("other-pod", ErrPermissionError), - }, - obj: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "new-pod", - Namespace: "default", - }, - }, - wantErr: false, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - t.Parallel() - - baseClient := fake.NewClientBuilder(). - WithScheme(scheme). - Build() - - fakeClient := NewFakeClientWithFailures(baseClient, tc.config) - - err := fakeClient.Create(context.Background(), tc.obj) - - if (err != nil) != tc.wantErr { - t.Errorf("Create() error = %v, wantErr %v", err, tc.wantErr) - } - }) - } -} - -func TestFakeClientWithFailures_Update(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "default", - }, - } - - tests := map[string]struct { - config *FailureConfig - wantErr bool - }{ - "no failure - update succeeds": { - config: nil, - wantErr: false, - }, - "fail on update": { - config: &FailureConfig{ - OnUpdate: FailOnObjectName("test-pod", ErrInjected), - }, - wantErr: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - t.Parallel() - - baseClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(pod). - Build() - - fakeClient := NewFakeClientWithFailures(baseClient, tc.config) - - err := fakeClient.Update(context.Background(), pod) - - if (err != nil) != tc.wantErr { - t.Errorf("Update() error = %v, wantErr %v", err, tc.wantErr) - } - }) - } -} - -func TestFakeClientWithFailures_Delete(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "default", - }, - } - - tests := map[string]struct { - config *FailureConfig - wantErr bool - }{ - "no failure - delete succeeds": { - config: nil, - wantErr: false, - }, - "fail on delete": { - config: &FailureConfig{ - OnDelete: FailOnObjectName("test-pod", ErrInjected), - }, - wantErr: true, - }, - "fail on namespace": { - config: &FailureConfig{ - OnDelete: FailOnNamespace("default", ErrPermissionError), - }, - wantErr: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - t.Parallel() - - baseClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(pod.DeepCopy()). - Build() - - fakeClient := NewFakeClientWithFailures(baseClient, tc.config) - - err := fakeClient.Delete(context.Background(), pod) - - if (err != nil) != tc.wantErr { - t.Errorf("Delete() error = %v, wantErr %v", err, tc.wantErr) - } - }) - } -} - -func TestFakeClientWithFailures_StatusUpdate(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "default", - }, - } - - tests := map[string]struct { - config *FailureConfig - wantErr bool - }{ - "no failure - status update succeeds": { - config: nil, - wantErr: false, - }, - "fail on status update": { - config: &FailureConfig{ - OnStatusUpdate: FailOnObjectName("test-pod", ErrInjected), - }, - wantErr: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - t.Parallel() - - baseClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(pod). - WithStatusSubresource(&corev1.Pod{}). - Build() - - fakeClient := NewFakeClientWithFailures(baseClient, tc.config) - - err := fakeClient.Status().Update(context.Background(), pod) - - if (err != nil) != tc.wantErr { - t.Errorf("Status().Update() error = %v, wantErr %v", err, tc.wantErr) - } - }) - } -} - -func TestFakeClientWithFailures_List(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "default", - }, - } - - tests := map[string]struct { - config *FailureConfig - wantErr bool - }{ - "no failure - list succeeds": { - config: nil, - wantErr: false, - }, - "fail on list": { - config: &FailureConfig{ - OnList: func(list client.ObjectList) error { - return ErrInjected - }, - }, - wantErr: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - t.Parallel() - - baseClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(pod). - Build() - - fakeClient := NewFakeClientWithFailures(baseClient, tc.config) - - podList := &corev1.PodList{} - err := fakeClient.List(context.Background(), podList) - - if (err != nil) != tc.wantErr { - t.Errorf("List() error = %v, wantErr %v", err, tc.wantErr) - } - }) - } -} - -func TestFakeClientWithFailures_Patch(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "default", - }, - } - - tests := map[string]struct { - config *FailureConfig - wantErr bool - }{ - "no failure - patch succeeds": { - config: nil, - wantErr: false, - }, - "fail on patch": { - config: &FailureConfig{ - OnPatch: FailOnObjectName("test-pod", ErrInjected), - }, - wantErr: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - t.Parallel() - - baseClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(pod.DeepCopy()). - Build() - - fakeClient := NewFakeClientWithFailures(baseClient, tc.config) - - patch := client.MergeFrom(pod.DeepCopy()) - err := fakeClient.Patch(context.Background(), pod, patch) - - if (err != nil) != tc.wantErr { - t.Errorf("Patch() error = %v, wantErr %v", err, tc.wantErr) - } - }) - } -} - -func TestFakeClientWithFailures_DeleteAllOf(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "default", - }, - } - - tests := map[string]struct { - config *FailureConfig - wantErr bool - }{ - "no failure - deleteAllOf succeeds": { - config: nil, - wantErr: false, - }, - "fail on deleteAllOf": { - config: &FailureConfig{ - OnDeleteAllOf: func(obj client.Object) error { - return ErrInjected - }, - }, - wantErr: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - t.Parallel() - - baseClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(pod). - Build() - - fakeClient := NewFakeClientWithFailures(baseClient, tc.config) - - err := fakeClient.DeleteAllOf( - context.Background(), - &corev1.Pod{}, - client.InNamespace("default"), - ) - - if (err != nil) != tc.wantErr { - t.Errorf("DeleteAllOf() error = %v, wantErr %v", err, tc.wantErr) - } - }) - } -} - -func TestFakeClientWithFailures_StatusPatch(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "default", - }, - } - - tests := map[string]struct { - config *FailureConfig - wantErr bool - }{ - "no failure - status patch succeeds": { - config: nil, - wantErr: false, - }, - "fail on status patch": { - config: &FailureConfig{ - OnStatusPatch: FailOnObjectName("test-pod", ErrInjected), - }, - wantErr: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - t.Parallel() - - baseClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(pod.DeepCopy()). - WithStatusSubresource(&corev1.Pod{}). - Build() - - fakeClient := NewFakeClientWithFailures(baseClient, tc.config) - - patch := client.MergeFrom(pod.DeepCopy()) - err := fakeClient.Status().Patch(context.Background(), pod, patch) - - if (err != nil) != tc.wantErr { - t.Errorf("Status().Patch() error = %v, wantErr %v", err, tc.wantErr) - } - }) - } -} - -func TestHelperFunctions(t *testing.T) { - t.Parallel() - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "default", - }, - } - - t.Run("FailOnObjectName - matching name", func(t *testing.T) { - t.Parallel() - - fn := FailOnObjectName("test-pod", ErrInjected) - err := fn(pod) - if err != ErrInjected { - t.Errorf("Expected ErrInjected, got %v", err) - } - }) - - t.Run("FailOnObjectName - different name", func(t *testing.T) { - t.Parallel() - - fn := FailOnObjectName("other-pod", ErrInjected) - err := fn(pod) - if err != nil { - t.Errorf("Expected no error, got %v", err) - } - }) - - t.Run("FailOnKeyName - matching name", func(t *testing.T) { - t.Parallel() - - fn := FailOnKeyName("test-pod", ErrInjected) - err := fn(client.ObjectKey{Name: "test-pod", Namespace: "default"}) - if err != ErrInjected { - t.Errorf("Expected ErrInjected, got %v", err) - } - }) - - t.Run("FailOnKeyName - different name", func(t *testing.T) { - t.Parallel() - - fn := FailOnKeyName("other-pod", ErrInjected) - err := fn(client.ObjectKey{Name: "test-pod", Namespace: "default"}) - if err != nil { - t.Errorf("Expected no error, got %v", err) - } - }) - - t.Run("FailOnNamespacedKeyName - matching name and namespace", func(t *testing.T) { - t.Parallel() - - fn := FailOnNamespacedKeyName("test-pod", "default", ErrInjected) - err := fn(client.ObjectKey{Name: "test-pod", Namespace: "default"}) - if err != ErrInjected { - t.Errorf("Expected ErrInjected, got %v", err) - } - }) - - t.Run("FailOnNamespacedKeyName - matching name but different namespace", func(t *testing.T) { - t.Parallel() - - fn := FailOnNamespacedKeyName("test-pod", "default", ErrInjected) - err := fn(client.ObjectKey{Name: "test-pod", Namespace: "kube-system"}) - if err != nil { - t.Errorf("Expected no error, got %v", err) - } - }) - - t.Run("FailOnNamespacedKeyName - different name but matching namespace", func(t *testing.T) { - t.Parallel() - - fn := FailOnNamespacedKeyName("test-pod", "default", ErrInjected) - err := fn(client.ObjectKey{Name: "other-pod", Namespace: "default"}) - if err != nil { - t.Errorf("Expected no error, got %v", err) - } - }) - - t.Run("FailOnNamespace - matching namespace", func(t *testing.T) { - t.Parallel() - - fn := FailOnNamespace("default", ErrInjected) - err := fn(pod) - if err != ErrInjected { - t.Errorf("Expected ErrInjected, got %v", err) - } - }) - - t.Run("FailOnNamespace - different namespace", func(t *testing.T) { - t.Parallel() - - fn := FailOnNamespace("other-ns", ErrInjected) - err := fn(pod) - if err != nil { - t.Errorf("Expected no error, got %v", err) - } - }) - - t.Run("FailKeyAfterNCalls", func(t *testing.T) { - t.Parallel() - - fn := FailKeyAfterNCalls(2, ErrInjected) - - // First call - should succeed - if err := fn(client.ObjectKey{Name: "test", Namespace: "default"}); err != nil { - t.Errorf("Call 1: expected no error, got %v", err) - } - - // Second call - should succeed - if err := fn(client.ObjectKey{Name: "test", Namespace: "default"}); err != nil { - t.Errorf("Call 2: expected no error, got %v", err) - } - - // Third call - should fail - if err := fn(client.ObjectKey{Name: "test", Namespace: "default"}); err != ErrInjected { - t.Errorf("Call 3: expected ErrInjected, got %v", err) - } - }) - - t.Run("FailObjAfterNCalls", func(t *testing.T) { - t.Parallel() - - fn := FailObjAfterNCalls(1, ErrPermissionError) - - // First call - should succeed - if err := fn(pod); err != nil { - t.Errorf("Call 1: expected no error, got %v", err) - } - - // Second call - should fail - if err := fn(pod); err != ErrPermissionError { - t.Errorf("Call 2: expected ErrPermissionError, got %v", err) - } - }) - - t.Run("FailObjListAfterNCalls", func(t *testing.T) { - t.Parallel() - - fn := FailObjListAfterNCalls(1, ErrNetworkTimeout) - podList := &corev1.PodList{} - - // First call - should succeed - if err := fn(podList); err != nil { - t.Errorf("Call 1: expected no error, got %v", err) - } - - // Second call - should fail - if err := fn(podList); err != ErrNetworkTimeout { - t.Errorf("Call 2: expected ErrNetworkTimeout, got %v", err) - } - }) - - t.Run("AlwaysFail with object", func(t *testing.T) { - t.Parallel() - - fn := AlwaysFail(ErrInjected) - err := fn(pod) - if err != ErrInjected { - t.Errorf("Expected ErrInjected, got %v", err) - } - }) - - t.Run("AlwaysFail with key", func(t *testing.T) { - t.Parallel() - - fn := AlwaysFail(ErrNetworkTimeout) - err := fn(client.ObjectKey{Name: "test", Namespace: "default"}) - if err != ErrNetworkTimeout { - t.Errorf("Expected ErrNetworkTimeout, got %v", err) - } - }) -} - -func TestHelperFunctions_Panic(t *testing.T) { - t.Parallel() - - t.Run("FailOnObjectName - panics on nil object", func(t *testing.T) { - t.Parallel() - - defer func() { - if r := recover(); r == nil { - t.Errorf("Expected panic when meta.Accessor fails on nil") - } - }() - - fn := FailOnObjectName("test", ErrInjected) - _ = fn(nil) // Should panic - }) - - t.Run("FailOnNamespace - panics on nil object", func(t *testing.T) { - t.Parallel() - - defer func() { - if r := recover(); r == nil { - t.Errorf("Expected panic when meta.Accessor fails on nil") - } - }() - - fn := FailOnNamespace("default", ErrInjected) - _ = fn(nil) // Should panic - }) -} diff --git a/pkg/resource-handler/controller/testutil/resource_watcher.go b/pkg/resource-handler/controller/testutil/resource_watcher.go deleted file mode 100644 index d0562d32..00000000 --- a/pkg/resource-handler/controller/testutil/resource_watcher.go +++ /dev/null @@ -1,803 +0,0 @@ -package testutil - -import ( - "context" - "errors" - "fmt" - "strings" - "sync" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -// Predefined error types for event waiting -var ( - // ErrKeepWaiting is a sentinel error that the predicate can return to - // indicate it wants to continue waiting for more events. - ErrKeepWaiting = errors.New("continue waiting for matching event") -) - -// ErrUnwatchedKinds is returned when trying to wait for resource kinds -// that aren't being watched by the ResourceWatcher. -type ErrUnwatchedKinds struct { - Kinds []string -} - -func (e *ErrUnwatchedKinds) Error() string { - return fmt.Sprintf( - "the following kinds are not being watched by this ResourceWatcher: %v", - e.Kinds, - ) -} - -// ResourceEvent represents a Kubernetes resource event. -type ResourceEvent struct { - Type string // "ADDED", "UPDATED", "DELETED" - Kind string // "Service", "StatefulSet", "Deployment", etc. - Name string - Namespace string - Object client.Object // The actual object (type-assert to specific type) - Time time.Time -} - -// ResourceWatcher collects events from multiple resource types. -type ResourceWatcher struct { - t testing.TB - - // Mutex is used for subscription mechanism, as well as "events" slice for - // historical events. - mu sync.RWMutex - - timeout time.Duration // Default timeout for WaitForMatch operations - cmpOpts []cmp.Option // Default comparison options for WaitForMatch - - extraResources []client.Object - watchedKinds map[string]any // Tracks which resource kinds are being watched - events []ResourceEvent - eventCh chan ResourceEvent - - // subscribers set up with a simple slice and loop over the channels. This - // may not be the most performant for lookup, but given that there shouldn't - // be too many subscribers in action, sticking with this approach. - subscribers []chan ResourceEvent // Fan-out channels for WaitForMatch -} - -type Option func(rw *ResourceWatcher) error - -// WithExtraResource adds a watch for an additional resource type. The object -// should be a pointer reference to the struct such as a custom resource. -// -// If you need to watch multiple resources, you can provide the list of -// resources. -func WithExtraResource(objs ...client.Object) Option { - return func(rw *ResourceWatcher) error { - rw.extraResources = append(rw.extraResources, objs...) - return nil - } -} - -// WithTimeout sets the default timeout for WaitForMatch operations. -// If not set, defaults to 5 seconds. -func WithTimeout(timeout time.Duration) Option { - return func(rw *ResourceWatcher) error { - rw.timeout = timeout - return nil - } -} - -// WithCmpOpts sets the default comparison options for WaitForMatch operations. -// These options are passed to go-cmp's Diff function. -func WithCmpOpts(opts ...cmp.Option) Option { - return func(rw *ResourceWatcher) error { - rw.cmpOpts = opts - return nil - } -} - -// NewResourceWatcher creates a new ResourceWatcher and automatically watches -// Service, StatefulSet, and Deployment resources. -func NewResourceWatcher( - t testing.TB, - ctx context.Context, - mgr manager.Manager, - opts ...Option, -) *ResourceWatcher { - t.Helper() - - watcher := &ResourceWatcher{ - t: t, - timeout: 5 * time.Second, // Default timeout - cmpOpts: nil, // Default: no special comparison options - watchedKinds: make(map[string]any), // Initialize watched kinds tracker - events: []ResourceEvent{}, - eventCh: make(chan ResourceEvent, 1000), - } - for _, o := range opts { - if err := o(watcher); err != nil { - t.Fatalf("Failed to set up watcher: %v", err) - } - } - - // Start background collector - go watcher.collectEvents(ctx) - - // Automatically watch standard resources - if err := watcher.watchResource(ctx, mgr, &corev1.Service{}); err != nil { - t.Fatalf("Failed to watch Service: %v", err) - } - if err := watcher.watchResource(ctx, mgr, &appsv1.StatefulSet{}); err != nil { - t.Fatalf("Failed to watch StatefulSet: %v", err) - } - if err := watcher.watchResource(ctx, mgr, &appsv1.Deployment{}); err != nil { - t.Fatalf("Failed to watch Deployment: %v", err) - } - - // Watch extra resources provided - for _, res := range watcher.extraResources { - if err := watcher.watchResource(ctx, mgr, res); err != nil { - t.Fatalf("Failed to watch custom resource %v: %v", res, err) - } - } - - return watcher -} - -// Events returns a snapshot of all collected events at the current time. -func (rw *ResourceWatcher) Events() []ResourceEvent { - rw.t.Helper() - - rw.mu.RLock() - defer rw.mu.RUnlock() - return append([]ResourceEvent{}, rw.events...) -} - -// EventCh returns the channel for receiving events directly. -// Useful for custom event processing logic. -func (rw *ResourceWatcher) EventCh() <-chan ResourceEvent { - rw.t.Helper() - return rw.eventCh -} - -// SetTimeout updates the default timeout for WaitForMatch operations. -// This can be called at any time to change the timeout for subsequent calls. -func (rw *ResourceWatcher) SetTimeout(timeout time.Duration) { - rw.t.Helper() - rw.timeout = timeout -} - -// ResetTimeout resets the timeout to the default value (5 seconds). -func (rw *ResourceWatcher) ResetTimeout() { - rw.t.Helper() - rw.timeout = 5 * time.Second -} - -// SetCmpOpts updates the default comparison options for WaitForMatch operations. -// This can be called at any time to change the options for subsequent calls. -func (rw *ResourceWatcher) SetCmpOpts(opts ...cmp.Option) { - rw.t.Helper() - rw.cmpOpts = opts -} - -// ResetCmpOpts resets the comparison options to nil (no special options). -func (rw *ResourceWatcher) ResetCmpOpts() { - rw.t.Helper() - rw.cmpOpts = nil -} - -// ForKind returns events for a specific resource kind. -func (rw *ResourceWatcher) ForKind(kind string) []ResourceEvent { - rw.t.Helper() - - rw.mu.RLock() - defer rw.mu.RUnlock() - - var filtered []ResourceEvent - for _, evt := range rw.events { - if evt.Kind == kind { - filtered = append(filtered, evt) - } - } - return filtered -} - -// ForName returns events for a specific resource name (across all kinds). -func (rw *ResourceWatcher) ForName(name string) []ResourceEvent { - rw.t.Helper() - - rw.mu.RLock() - defer rw.mu.RUnlock() - - var filtered []ResourceEvent - for _, evt := range rw.events { - if evt.Name == name { - filtered = append(filtered, evt) - } - } - return filtered -} - -// Count returns the total number of events collected. -func (rw *ResourceWatcher) Count() int { - rw.t.Helper() - - rw.mu.RLock() - defer rw.mu.RUnlock() - return len(rw.events) -} - -// waitForEvent is a helper that waits for an event matching the predicate -// function. It handles the common select/timeout logic and returns the matching -// event or an error. -// -// The predicate function should return an error indicating the action to take: -// - nil: match found, stop waiting and return the event successfully -// - ErrKeepWaiting: continue waiting for more events -// - any other error: stop waiting and return that error to the caller -// -// Returns: -// - (*ResourceEvent, nil): when predicate returns nil (match found) -// - (nil, context.Canceled): when the subscription channel is closed (watcher stopped) -// - (nil, context.DeadlineExceeded): when the deadline is reached -// - (nil, error): when predicate returns an error other than ErrKeepWaiting -// -// TODO: Currently there is no use of the matched event, maybe it's someting we -// can drop. -func waitForEvent( - t testing.TB, - subCh chan ResourceEvent, - deadline time.Time, - predicate func(ResourceEvent) error, -) (*ResourceEvent, error) { - t.Helper() - - for { - select { - case evt, ok := <-subCh: - if !ok { - // Channel closed (context cancelled) - return nil, context.Canceled - } - - err := predicate(evt) - if err == nil { - // Match found - return &evt, nil - } - if errors.Is(err, ErrKeepWaiting) { - // Continue waiting for more events - continue - } - // Any other error, stop and return it - return nil, err - - case <-time.After(time.Until(deadline)): - if !time.Now().Before(deadline) { - // Timeout reached - return nil, context.DeadlineExceeded - } - } - } -} - -// collectEvents collects events and fanning out to subscribers. -// -// This is meant to be run in the background using goroutines. -func (rw *ResourceWatcher) collectEvents(ctx context.Context) { - rw.t.Helper() - - for { - select { - case evt := <-rw.eventCh: - rw.mu.Lock() - // Store in the events slice for cache. - rw.events = append(rw.events, evt) - - // Fan out to all subscribers. - for _, subCh := range rw.subscribers { - select { - case subCh <- evt: - // Event sent to subscriber. - default: - // When subscriber channel is full, skip. - rw.t.Logf("Warning: subscriber channel full, dropping event") - } - } - rw.mu.Unlock() - case <-ctx.Done(): - // Close all subscriber channels. - rw.mu.Lock() - for _, subCh := range rw.subscribers { - close(subCh) - } - rw.subscribers = nil - rw.mu.Unlock() - return - } - } -} - -// WaitForMatch waits for one or more resources to match the expected objects -// using go-cmp comparison. Returns nil when all matched, error on timeout. -// -// Uses the watcher's configured timeout and comparison options (set via -// SetTimeout/SetCmpOpts or during initialization with WithTimeout/WithCmpOpts). -// -// The timeout applies to the entire operation, not per resource. All resources -// share the same deadline. -// -// First checks existing events for early return, then subscribes to new events. -// Note that, when the desired state is found, this terminates prematurely -// regardless of how the future events change the actual state of the object. -// -// When multiple objects are provided, waits for all of them to match. -// -// Example: -// -// watcher.SetCmpOpts(testutil.CompareSpecOnly()...) -// expectedSts := &appsv1.StatefulSet{ -// Spec: appsv1.StatefulSetSpec{Replicas: ptr.To(int32(3))}, -// } -// expectedSvc := &corev1.Service{...} -// err := watcher.WaitForMatch(expectedSts, expectedSvc) -// if err != nil { -// t.Errorf("Resources never reached expected state: %v", err) -// } -func (rw *ResourceWatcher) WaitForMatch(expected ...client.Object) error { - rw.t.Helper() - - if len(expected) == 0 { - return nil - } - - // Validate all provided kinds are being watched before waiting, and if any - // is missing, return early with an error. - var unwatchedKinds []string - for _, obj := range expected { - kind := extractKind(obj) - if _, watched := rw.watchedKinds[kind]; !watched { - unwatchedKinds = append(unwatchedKinds, kind) - } - } - if len(unwatchedKinds) > 0 { - return &ErrUnwatchedKinds{Kinds: unwatchedKinds} - } - - // Calculate deadline once for all objects. - deadline := time.Now().Add(rw.timeout) - // Note that this is also copied so that long running subscription won't - // refer to other cmpOpts which can be updated while running. - cmpOpts := rw.cmpOpts - - // Wait for each object to match using shared deadline. - // Note how this does not run checks concurrently for simplicity. All the - // events are stored in the events cache slice, and thus starting - // sequentially would still result in the match against the latest event. - // There could be some potential nuance where object getting updated after - // the match is found, and this does not cover such use cases. - for _, obj := range expected { - if err := rw.waitForSingleMatch(obj, deadline, cmpOpts); err != nil { - return err - } - } - - return nil -} - -// waitForSingleMatch waits for a single resource to match the expected object. -func (rw *ResourceWatcher) waitForSingleMatch( - expected client.Object, - deadline time.Time, - cmpOpts []cmp.Option, -) error { - rw.t.Helper() - - kind := extractKind(expected) - - // Step 1: Check latest state of resource in existing events. - matched, diff := rw.checkLatestEventMatches(expected, cmpOpts) - if matched { - return nil - } - if diff != "" { - suffix := "" - // When the build flag is specified, include the diff as suffix. - if showDiffs { - suffix = "\n" + diff - } - rw.t.Logf("Exists but not matching \"%s\", subscribing for updates...%s", kind, suffix) - } - - // Step 2: Subscribe to new events. - subCh := rw.subscribe() - defer rw.unsubscribe(subCh) - - // Step 3: Wait for matching event or timeout (using shared deadline). - // Initialize lastDiff with the diff from initial check (if any). - lastDiff := diff - - predicate := func(evt ResourceEvent) error { - // Only check events of the matching kind. - if evt.Kind != kind { - return ErrKeepWaiting - } - - // Compare using go-cmp. - diff := cmp.Diff(expected, evt.Object, cmpOpts...) - if diff == "" { - rw.t.Logf("Matched \"%s\" %s/%s", kind, evt.Namespace, evt.Name) - return nil - } - - // Store last diff for error reporting. - lastDiff = diff - // Log verbosity is handled by build flag of "verbose". - - suffix := "" - // When the build flag is specified, include the diff as suffix. - if showDiffs { - suffix = "\n" + diff - } - rw.t.Logf("Waiting for \"%s\" %s/%s%s", kind, evt.Namespace, evt.Name, suffix) - - return ErrKeepWaiting - } - - _, err := waitForEvent(rw.t, subCh, deadline, predicate) - if err != nil { - if errors.Is(err, context.DeadlineExceeded) { - if lastDiff != "" { - return fmt.Errorf( - "timeout waiting for %s to match.\nLast diff (-want +got):\n%s", - kind, - lastDiff, - ) - } - return fmt.Errorf("timeout waiting for %s (no events of this kind received)", kind) - } - if errors.Is(err, context.Canceled) { - return fmt.Errorf("watcher stopped") - } - return err - } - - // Match found, return nil. - // TODO: We could return the matched object, but not necessary with the - // current logic. - return nil -} - -// WaitForEventType waits for an event with specific kind and type -// (ADDED, UPDATED, DELETED). Returns the first matching event, or error on -// timeout. -func (rw *ResourceWatcher) WaitForEventType( - kind, eventType string, - timeout time.Duration, -) (*ResourceEvent, error) { - rw.t.Helper() - - // Step 1: Check existing events first - rw.mu.RLock() - for _, evt := range rw.events { - if evt.Kind == kind && evt.Type == eventType { - result := evt - rw.mu.RUnlock() - rw.t.Logf("Found %s \"%s\" %s/%s", eventType, kind, evt.Namespace, evt.Name) - return &result, nil - } - } - rw.mu.RUnlock() - - // Step 2: Subscribe to new events - subCh := rw.subscribe() - defer rw.unsubscribe(subCh) - - // Step 3: Wait for matching event - deadline := time.Now().Add(timeout) - - for { - select { - case evt, ok := <-subCh: - if !ok { - return nil, fmt.Errorf("watcher stopped") - } - - if evt.Kind == kind && evt.Type == eventType { - rw.t.Logf("Found %s \"%s\" %s/%s", eventType, kind, evt.Namespace, evt.Name) - return &evt, nil - } - - case <-time.After(time.Until(deadline)): - if !time.Now().Before(deadline) { - return nil, fmt.Errorf("timeout waiting for %s \"%s\" event", eventType, kind) - } - } - } -} - -// watchResource sets up an informer for a resource type. -func (rw *ResourceWatcher) watchResource( - ctx context.Context, - mgr manager.Manager, - obj client.Object, -) error { - rw.t.Helper() - - informer, err := mgr.GetCache().GetInformer(ctx, obj) - if err != nil { - return fmt.Errorf("failed to get informer: %w", err) - } - - kind := extractKind(obj) - - _, err = informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj any) { - cObj := obj.(client.Object) - rw.sendEvent("ADDED", kind, cObj) - }, - UpdateFunc: func(oldObj, newObj any) { - cObj := newObj.(client.Object) - rw.sendEvent("UPDATED", kind, cObj) - }, - DeleteFunc: func(obj any) { - cObj := obj.(client.Object) - rw.sendEvent("DELETED", kind, cObj) - }, - }) - if err != nil { - return err - } - - // Track this kind as watched - rw.watchedKinds[kind] = nil - - return nil -} - -// sendEvent sends an event to the channel, fallback to skip when the channel -// cannot receive. -func (rw *ResourceWatcher) sendEvent(eventType, kind string, obj client.Object) { - rw.t.Helper() - - event := ResourceEvent{ - Type: eventType, - Kind: kind, - Name: obj.GetName(), - Namespace: obj.GetNamespace(), - Object: obj.DeepCopyObject().(client.Object), - Time: time.Now(), - } - - select { - case rw.eventCh <- event: - rw.t.Logf( - "(%s) \"%s\" %s/%s", - strings.ToLower(eventType), - kind, - obj.GetNamespace(), - obj.GetName(), - ) - default: - rw.t.Logf("Warning: event channel full, dropping event") - } -} - -// findLatestEvent searches for an event matching the predicate function (newest first). -// The predicate is called with each event (newest to oldest) while holding the read lock. -// Returns the first matching event, or nil if none found. -func (rw *ResourceWatcher) findLatestEvent(predicate func(ResourceEvent) bool) *ResourceEvent { - rw.t.Helper() - - rw.mu.RLock() - defer rw.mu.RUnlock() - - for i := len(rw.events) - 1; i >= 0; i-- { - if predicate(rw.events[i]) { - evt := rw.events[i] - return &evt - } - } - return nil -} - -// findLatestEventFor finds the most recent event matching the given object. -// If the object has empty name and namespace, it matches by kind only. -// Returns nil if no matching event found. -func (rw *ResourceWatcher) findLatestEventFor(obj client.Object) *ResourceEvent { - rw.t.Helper() - - kind := extractKind(obj) - name := obj.GetName() - namespace := obj.GetNamespace() - - return rw.findLatestEvent(func(evt ResourceEvent) bool { - if evt.Kind != kind { - return false - } - if name != "" && evt.Name != name { - return false - } - if namespace != "" && evt.Namespace != namespace { - return false - } - return true - }) -} - -// checkLatestEventMatches finds the latest event for the expected object and compares it. -// Returns (matched, diff). If no event found, returns (false, ""). -func (rw *ResourceWatcher) checkLatestEventMatches( - expected client.Object, - cmpOpts []cmp.Option, -) (bool, string) { - rw.t.Helper() - - latestEvt := rw.findLatestEventFor(expected) - if latestEvt == nil { - return false, "" - } - - diff := cmp.Diff(expected, latestEvt.Object, cmpOpts...) - if diff == "" { - rw.t.Logf( - "Matched \"%s\" %s/%s (from existing events)", - latestEvt.Kind, - latestEvt.Namespace, - latestEvt.Name, - ) - return true, "" - } - - return false, diff -} - -// subscribe creates and registers a new subscriber channel for fan-out. -func (rw *ResourceWatcher) subscribe() chan ResourceEvent { - rw.t.Helper() - - // NOTE: Arbitrary buffer set - subCh := make(chan ResourceEvent, 100) - - rw.mu.Lock() - rw.subscribers = append(rw.subscribers, subCh) - rw.mu.Unlock() - - return subCh -} - -// unsubscribe removes and closes a subscriber channel. -func (rw *ResourceWatcher) unsubscribe(subCh chan ResourceEvent) { - rw.t.Helper() - - rw.mu.Lock() - for i, ch := range rw.subscribers { - if ch == subCh { - rw.subscribers = append(rw.subscribers[:i], rw.subscribers[i+1:]...) - break - } - } - rw.mu.Unlock() - - close(subCh) -} - -// extractKind extracts a clean kind name from a client.Object (internal helper). -func extractKind(obj client.Object) string { - kind := fmt.Sprintf("%T", obj) - // Remove pointer prefix - if len(kind) > 0 && kind[0] == '*' { - kind = kind[1:] - } - // Extract just the type name after the last dot - for i := len(kind) - 1; i >= 0; i-- { - if kind[i] == '.' { - return kind[i+1:] - } - } - return kind -} - -// Obj creates a client.Object with the given name and namespace. -// This is a convenience helper for deletion testing and other scenarios -// where you need to reference an object by name/namespace only. -// -// Example: -// -// watcher.WaitForDeletion(testutil.Obj[appsv1.StatefulSet]("etcd", "default")) -func Obj[T any, PT interface { - *T - client.Object -}](name, namespace string) PT { - obj := new(T) - ptr := PT(obj) - ptr.SetName(name) - ptr.SetNamespace(namespace) - return ptr -} - -// WaitForDeletion waits for one or more resources to be deleted (receive DELETED events). -// This checks that resources were fully removed from the cluster, not just -// marked for deletion with DeletionTimestamp. -// -// Uses the watcher's configured timeout. The timeout applies to the entire operation, -// not per resource. All resources share the same deadline. -// -// Example: -// -// // Delete parent resource -// client.Delete(ctx, etcd) -// -// // Wait for owned resources to be cascade deleted -// err := watcher.WaitForDeletion( -// testutil.Obj[appsv1.StatefulSet]("etcd", "default"), -// testutil.Obj[corev1.Service]("etcd", "default"), -// testutil.Obj[corev1.Service]("etcd-headless", "default"), -// ) -func (rw *ResourceWatcher) WaitForDeletion(objs ...client.Object) error { - rw.t.Helper() - - if len(objs) == 0 { - return nil - } - - deadline := time.Now().Add(rw.timeout) - - for _, obj := range objs { - if err := rw.waitForSingleDeletion(obj, deadline); err != nil { - return err - } - } - - return nil -} - -// waitForSingleDeletion waits for a single resource to be deleted. -func (rw *ResourceWatcher) waitForSingleDeletion(obj client.Object, deadline time.Time) error { - rw.t.Helper() - - kind := extractKind(obj) - name := obj.GetName() - namespace := obj.GetNamespace() - - // Check existing events first - if evt := rw.findLatestEvent(func(e ResourceEvent) bool { - return e.Kind == kind && e.Name == name && e.Namespace == namespace && e.Type == "DELETED" - }); evt != nil { - rw.t.Logf("Matched DELETED \"%s\" %s/%s (from existing events)", kind, namespace, name) - return nil - } - - // Subscribe to new events - subCh := rw.subscribe() - defer rw.unsubscribe(subCh) - - // Wait for DELETED event - predicate := func(evt ResourceEvent) error { - if evt.Kind != kind || evt.Name != name || evt.Namespace != namespace { - return ErrKeepWaiting - } - if evt.Type == "DELETED" { - rw.t.Logf("Matched DELETED \"%s\" %s/%s", kind, namespace, name) - return nil - } - return ErrKeepWaiting - } - - _, err := waitForEvent(rw.t, subCh, deadline, predicate) - if err != nil { - if errors.Is(err, context.DeadlineExceeded) { - return fmt.Errorf("timeout waiting for %s %s/%s to be deleted", kind, namespace, name) - } - if errors.Is(err, context.Canceled) { - return fmt.Errorf("watcher stopped") - } - return err - } - return nil -} diff --git a/pkg/resource-handler/controller/testutil/resource_watcher_diffs.go b/pkg/resource-handler/controller/testutil/resource_watcher_diffs.go deleted file mode 100644 index 5de1d92f..00000000 --- a/pkg/resource-handler/controller/testutil/resource_watcher_diffs.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !verbose - -package testutil - -// showDiffs controls whether detailed diffs are logged when resources don't match. -// By default, only summary messages are shown. -// To enable detailed diffs, build with the "verbose" tag: go test -tags=verbose -const showDiffs = false diff --git a/pkg/resource-handler/controller/testutil/resource_watcher_diffs_verbose.go b/pkg/resource-handler/controller/testutil/resource_watcher_diffs_verbose.go deleted file mode 100644 index 545b2d26..00000000 --- a/pkg/resource-handler/controller/testutil/resource_watcher_diffs_verbose.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build verbose - -package testutil - -// showDiffs controls whether detailed diffs are logged when resources don't match. -// This file is compiled when building with the "verbose" tag: go test -tags=verbose -const showDiffs = true diff --git a/pkg/resource-handler/controller/testutil/resource_watcher_test.go b/pkg/resource-handler/controller/testutil/resource_watcher_test.go deleted file mode 100644 index be5c6bd1..00000000 --- a/pkg/resource-handler/controller/testutil/resource_watcher_test.go +++ /dev/null @@ -1,737 +0,0 @@ -//go:build integration -// +build integration - -package testutil_test - -import ( - "context" - "errors" - "reflect" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/numtide/multigres-operator/pkg/resource-handler/controller/testutil" -) - -// TestResourceWatcher_BeforeCreation tests that watcher can subscribe to events -// that haven't happened yet (watcher started before resource creation). -func TestResourceWatcher_BeforeCreation(t *testing.T) { - t.Parallel() - - tests := map[string]struct { - setup func(ctx context.Context, c client.Client) error - assertFunc func(t *testing.T, watcher *testutil.ResourceWatcher) - }{ - "single service created": { - setup: func(ctx context.Context, c client.Client) error { - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-svc", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{{Port: 80}}, - }, - } - return c.Create(ctx, svc) - }, - assertFunc: func(t *testing.T, watcher *testutil.ResourceWatcher) { - // Expected object with Kubernetes defaults explicitly set - expected := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-svc", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Protocol: corev1.ProtocolTCP, // Kubernetes default - Port: 80, - TargetPort: intstr.FromInt(80), // Defaults to Port - }, - }, - Type: corev1.ServiceTypeClusterIP, // Kubernetes default - SessionAffinity: corev1.ServiceAffinityNone, // Kubernetes default - }, - } - - // Configure watcher with comparison options - opts := append( - testutil.IgnoreMetaRuntimeFields(), - testutil.IgnoreStatus(), - testutil.IgnoreServiceRuntimeFields(), - ) - watcher.SetCmpOpts(opts...) - - err := watcher.WaitForMatch(expected) - if err != nil { - t.Errorf("Failed to wait for Service: %v", err) - } - }, - }, - "statefulset created": { - setup: func(ctx context.Context, c client.Client) error { - sts := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-sts", - Namespace: "default", - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: ptr.To(int32(2)), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "nginx", - Image: "nginx:latest", - }, - }, - }, - }, - }, - } - return c.Create(ctx, sts) - }, - assertFunc: func(t *testing.T, watcher *testutil.ResourceWatcher) { - expected := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-sts", - Namespace: "default", - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: ptr.To(int32(2)), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "nginx", - Image: "nginx:latest", - }, - }, - }, - }, - }, - } - - opts := append( - testutil.IgnoreMetaRuntimeFields(), - testutil.IgnoreStatefulSetRuntimeFields(), - testutil.IgnorePodSpecDefaults(), - testutil.IgnoreStatefulSetSpecDefaults(), - ) - watcher.SetCmpOpts(opts...) - - err := watcher.WaitForMatch(expected) - if err != nil { - t.Errorf("Failed to wait for StatefulSet: %v", err) - } - }, - }, - "deployment created": { - setup: func(ctx context.Context, c client.Client) error { - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deploy", - Namespace: "default", - }, - Spec: appsv1.DeploymentSpec{ - Replicas: ptr.To(int32(1)), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "nginx"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "nginx"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "nginx", - Image: "nginx:1.25", - }, - }, - }, - }, - }, - } - return c.Create(ctx, deploy) - }, - assertFunc: func(t *testing.T, watcher *testutil.ResourceWatcher) { - expected := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deploy", - Namespace: "default", - }, - Spec: appsv1.DeploymentSpec{ - Replicas: ptr.To(int32(1)), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "nginx"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "nginx"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "nginx", - Image: "nginx:1.25", - ImagePullPolicy: corev1.PullIfNotPresent, // Default for non-:latest tag - }, - }, - }, - }, - }, - } - - opts := append( - testutil.IgnoreMetaRuntimeFields(), - testutil.IgnoreDeploymentRuntimeFields(), - testutil.IgnorePodSpecDefaultsExceptPullPolicy(), // Keep ImagePullPolicy for verification - testutil.IgnoreDeploymentSpecDefaults(), - ) - watcher.SetCmpOpts(opts...) - - err := watcher.WaitForMatch(expected) - if err != nil { - t.Errorf("Failed to wait for Deployment: %v", err) - } - }, - }, - "multiple unwatched kinds fail immediately": { - setup: func(ctx context.Context, c client.Client) error { - // No setup needed - we're testing validation before waiting - return nil - }, - assertFunc: func(t *testing.T, watcher *testutil.ResourceWatcher) { - // Try to wait for ConfigMap and Secret which are not being watched - err := watcher.WaitForMatch(&corev1.ConfigMap{}, &corev1.Secret{}) - if err == nil { - t.Errorf("Expected error for unwatched kinds, but got nil") - return - } - - want := &testutil.ErrUnwatchedKinds{Kinds: []string{"ConfigMap", "Secret"}} - var got *testutil.ErrUnwatchedKinds - if !errors.As(err, &got) { - t.Errorf("Expected ErrUnwatchedKinds, got: %T - %v", err, err) - return - } - - if !reflect.DeepEqual(want.Kinds, got.Kinds) { - t.Errorf("Expected unwatched kinds = %v, got = %v", want.Kinds, got.Kinds) - } - }, - }, - } - - for name, tc := range tests { - name, tc := name, tc - t.Run(name, func(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - - ctx := t.Context() - mgr := testutil.SetUpEnvtestManager(t, scheme) - - watcher := testutil.NewResourceWatcher(t, ctx, mgr) - c := mgr.GetClient() - - // Start assertion in background FIRST - done := make(chan error, 1) - go func() { - defer close(done) - tc.assertFunc(t, watcher) - }() - - // THEN create resources (tests subscription path) - if err := tc.setup(ctx, c); err != nil { - t.Fatalf("Setup failed: %v", err) - } - - // Wait for assertion to complete - <-done - }) - } -} - -// TestResourceWatcher_AfterCreation tests that watcher correctly handles resources -// that were created before the watcher started, and subsequent updates. -func TestResourceWatcher_AfterCreation(t *testing.T) { - t.Parallel() - - tests := map[string]struct { - setup func(ctx context.Context, c client.Client) error - update func(ctx context.Context, c client.Client) error - assertFunc func(t *testing.T, watcher *testutil.ResourceWatcher) - }{ - "service updated after watcher starts": { - setup: func(ctx context.Context, c client.Client) error { - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-svc", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{{Port: 80}}, - }, - } - return c.Create(ctx, svc) - }, - update: func(ctx context.Context, c client.Client) error { - svc := &corev1.Service{} - if err := c.Get(ctx, client.ObjectKey{ - Name: "test-svc", - Namespace: "default", - }, svc); err != nil { - return err - } - - svc.Spec.Ports[0].Port = 8080 - return c.Update(ctx, svc) - }, - assertFunc: func(t *testing.T, watcher *testutil.ResourceWatcher) { - // Expected object with Kubernetes defaults and updated port - expected := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-svc", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Protocol: corev1.ProtocolTCP, // Kubernetes default - Port: 8080, // Updated value - TargetPort: intstr.FromInt(80), // Original TargetPort preserved - }, - }, - Type: corev1.ServiceTypeClusterIP, // Kubernetes default - SessionAffinity: corev1.ServiceAffinityNone, // Kubernetes default - }, - } - - opts := append( - testutil.IgnoreMetaRuntimeFields(), - testutil.IgnoreStatus(), - testutil.IgnoreServiceRuntimeFields(), - ) - watcher.SetCmpOpts(opts...) - - err := watcher.WaitForMatch(expected) - if err != nil { - t.Errorf("Failed to wait for updated Service: %v", err) - } - }, - }, - "service deleted after watcher starts": { - setup: func(ctx context.Context, c client.Client) error { - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-svc-delete", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{{Port: 80}}, - }, - } - return c.Create(ctx, svc) - }, - update: func(ctx context.Context, c client.Client) error { - svc := &corev1.Service{} - if err := c.Get(ctx, client.ObjectKey{ - Name: "test-svc-delete", - Namespace: "default", - }, svc); err != nil { - return err - } - return c.Delete(ctx, svc) - }, - assertFunc: func(t *testing.T, watcher *testutil.ResourceWatcher) { - // Wait specifically for DELETED event - evt, err := watcher.WaitForEventType("Service", "DELETED", 5*time.Second) - if err != nil { - t.Fatalf("Failed to wait for Service DELETED event: %v", err) - } - - if evt.Name != "test-svc-delete" { - t.Errorf("Expected test-svc-delete, got %s", evt.Name) - } - - t.Logf("Successfully detected Service deletion") - }, - }, - } - - for name, tc := range tests { - name, tc := name, tc - t.Run(name, func(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - - ctx := context.Background() - mgr := testutil.SetUpEnvtestManager(t, scheme) - c := mgr.GetClient() - - // Create resources FIRST - if err := tc.setup(ctx, c); err != nil { - t.Fatalf("Setup failed: %v", err) - } - - // THEN start watcher (won't see initial creation) - watcher := testutil.NewResourceWatcher(t, ctx, mgr) - - // Start assertion in background - done := make(chan error, 1) - go func() { - defer close(done) - tc.assertFunc(t, watcher) - }() - - // Trigger update (this should be picked up by watcher) - if err := tc.update(ctx, c); err != nil { - t.Fatalf("Update failed: %v", err) - } - - // Wait for assertion to complete - <-done - }) - } -} - -// TestObj tests the generic Obj helper function. -func TestObj(t *testing.T) { - t.Parallel() - - tests := map[string]struct { - obj client.Object - expected client.Object - }{ - "Service": { - obj: testutil.Obj[corev1.Service]("my-service", "my-namespace"), - expected: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - Namespace: "my-namespace", - }, - }, - }, - "StatefulSet": { - obj: testutil.Obj[appsv1.StatefulSet]("my-sts", "default"), - expected: &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-sts", - Namespace: "default", - }, - }, - }, - "Deployment": { - obj: testutil.Obj[appsv1.Deployment]("my-deploy", "kube-system"), - expected: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-deploy", - Namespace: "kube-system", - }, - }, - }, - "ConfigMap": { - obj: testutil.Obj[corev1.ConfigMap]("my-cm", "default"), - expected: &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-cm", - Namespace: "default", - }, - }, - }, - "Secret": { - obj: testutil.Obj[corev1.Secret]("my-secret", "default"), - expected: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-secret", - Namespace: "default", - }, - }, - }, - "Pod": { - obj: testutil.Obj[corev1.Pod]("my-pod", "default"), - expected: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-pod", - Namespace: "default", - }, - }, - }, - "DaemonSet": { - obj: testutil.Obj[appsv1.DaemonSet]("my-ds", "default"), - expected: &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-ds", - Namespace: "default", - }, - }, - }, - "ReplicaSet": { - obj: testutil.Obj[appsv1.ReplicaSet]("my-rs", "default"), - expected: &appsv1.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-rs", - Namespace: "default", - }, - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - t.Parallel() - - if diff := cmp.Diff(tc.expected, tc.obj); diff != "" { - t.Errorf("Obj mismatch (-want +got):\n%s", diff) - } - }) - } -} - -// TestWaitForDeletion tests the WaitForDeletion function. -func TestWaitForDeletion(t *testing.T) { - t.Parallel() - - tests := map[string]struct { - setup func(ctx context.Context, c client.Client, watcher *testutil.ResourceWatcher) error - delete func(ctx context.Context, c client.Client) error - assertFunc func(t *testing.T, watcher *testutil.ResourceWatcher) - }{ - "single service deletion": { - setup: func(ctx context.Context, c client.Client, watcher *testutil.ResourceWatcher) error { - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}, - Spec: corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 80}}}, - } - if err := c.Create(ctx, svc); err != nil { - return err - } - // Wait for creation to be observed - watcher.SetCmpOpts(testutil.IgnoreMetaRuntimeFields(), testutil.IgnoreServiceRuntimeFields()) - return watcher.WaitForMatch(svc) - }, - delete: func(ctx context.Context, c client.Client) error { - return c.Delete(ctx, testutil.Obj[corev1.Service]("test-svc", "default")) - }, - assertFunc: func(t *testing.T, watcher *testutil.ResourceWatcher) { - err := watcher.WaitForDeletion(testutil.Obj[corev1.Service]("test-svc", "default")) - if err != nil { - t.Errorf("Failed to wait for deletion: %v", err) - } - }, - }, - "multiple services deletion": { - setup: func(ctx context.Context, c client.Client, watcher *testutil.ResourceWatcher) error { - svc1 := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "svc-1", Namespace: "default"}, - Spec: corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 80}}}, - } - svc2 := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "svc-2", Namespace: "default"}, - Spec: corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 80}}}, - } - if err := c.Create(ctx, svc1); err != nil { - return err - } - if err := c.Create(ctx, svc2); err != nil { - return err - } - watcher.SetCmpOpts(testutil.IgnoreMetaRuntimeFields(), testutil.IgnoreServiceRuntimeFields()) - return watcher.WaitForMatch(svc1, svc2) - }, - delete: func(ctx context.Context, c client.Client) error { - if err := c.Delete(ctx, testutil.Obj[corev1.Service]("svc-1", "default")); err != nil { - return err - } - return c.Delete(ctx, testutil.Obj[corev1.Service]("svc-2", "default")) - }, - assertFunc: func(t *testing.T, watcher *testutil.ResourceWatcher) { - err := watcher.WaitForDeletion( - testutil.Obj[corev1.Service]("svc-1", "default"), - testutil.Obj[corev1.Service]("svc-2", "default"), - ) - if err != nil { - t.Errorf("Failed to wait for multiple deletions: %v", err) - } - }, - }, - "mixed resource types deletion": { - setup: func(ctx context.Context, c client.Client, watcher *testutil.ResourceWatcher) error { - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "my-svc", Namespace: "default"}, - Spec: corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 80}}}, - } - deploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "my-deploy", Namespace: "default"}, - Spec: appsv1.DeploymentSpec{ - Replicas: ptr.To(int32(1)), - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "test"}}, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "test"}}, - Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, - }, - }, - } - if err := c.Create(ctx, svc); err != nil { - return err - } - if err := c.Create(ctx, deploy); err != nil { - return err - } - watcher.SetCmpOpts( - testutil.IgnoreMetaRuntimeFields(), - testutil.IgnoreServiceRuntimeFields(), - testutil.IgnoreDeploymentRuntimeFields(), - testutil.IgnoreDeploymentSpecDefaults(), - testutil.IgnorePodSpecDefaults(), - ) - return watcher.WaitForMatch(svc, deploy) - }, - delete: func(ctx context.Context, c client.Client) error { - if err := c.Delete(ctx, testutil.Obj[corev1.Service]("my-svc", "default")); err != nil { - return err - } - return c.Delete(ctx, testutil.Obj[appsv1.Deployment]("my-deploy", "default")) - }, - assertFunc: func(t *testing.T, watcher *testutil.ResourceWatcher) { - err := watcher.WaitForDeletion( - testutil.Obj[corev1.Service]("my-svc", "default"), - testutil.Obj[appsv1.Deployment]("my-deploy", "default"), - ) - if err != nil { - t.Errorf("Failed to wait for mixed type deletions: %v", err) - } - }, - }, - } - - for name, tc := range tests { - name, tc := name, tc - t.Run(name, func(t *testing.T) { - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - - ctx := context.Background() - mgr := testutil.SetUpEnvtestManager(t, scheme) - c := mgr.GetClient() - watcher := testutil.NewResourceWatcher(t, ctx, mgr) - - if err := tc.setup(ctx, c, watcher); err != nil { - t.Fatalf("Setup failed: %v", err) - } - - if err := tc.delete(ctx, c); err != nil { - t.Fatalf("Delete failed: %v", err) - } - - tc.assertFunc(t, watcher) - }) - } -} - -// TestWaitForDeletion_CascadingDelete tests cascading deletion with garbage collector. -// -// This test is currently skipped because envtest does not support garbage collection. -// Envtest only runs kube-apiserver and etcd, not kube-controller-manager where the -// garbage collector controller actually runs. As a result, cascading deletion via -// owner references does not work in envtest. -// -// To test cascading deletion properly, this test should be moved to a separate -// test suite that uses kind. That will be implemented in a future PR with kind-based -// integration tests. -// -// For now, we test that owner references are set correctly (which is our controller's -// responsibility), and trust that Kubernetes GC will handle the actual deletion -// (which is Kubernetes's responsibility and is well-tested upstream). -func TestWaitForDeletion_CascadingDelete(t *testing.T) { - t.Skip("Cascading deletion not supported in envtest (requires kube-controller-manager). " + - "This test will be moved to kind-based integration tests in a future PR. " + - "See: https://github.com/kubernetes-sigs/controller-runtime/issues/626") - - t.Parallel() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - - ctx := context.Background() - mgr := testutil.SetUpEnvtestManager(t, scheme) - c := mgr.GetClient() - watcher := testutil.NewResourceWatcher(t, ctx, mgr) - - owner := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{Name: "owner", Namespace: "default"}, - Data: map[string]string{"key": "value"}, - } - if err := c.Create(ctx, owner); err != nil { - t.Fatalf("Failed to create owner: %v", err) - } - - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "owned-svc", - Namespace: "default", - OwnerReferences: []metav1.OwnerReference{{ - APIVersion: "v1", - Kind: "ConfigMap", - Name: owner.Name, - UID: owner.UID, - Controller: ptr.To(true), - BlockOwnerDeletion: ptr.To(true), - }}, - }, - Spec: corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 80}}}, - } - if err := c.Create(ctx, svc); err != nil { - t.Fatalf("Failed to create owned service: %v", err) - } - - // Wait for service to be created - watcher.SetCmpOpts(testutil.IgnoreMetaRuntimeFields(), testutil.IgnoreServiceRuntimeFields()) - if err := watcher.WaitForMatch(svc); err != nil { - t.Fatalf("Failed to wait for service creation: %v", err) - } - - // Delete owner - should cascade to owned service - if err := c.Delete(ctx, owner); err != nil { - t.Fatalf("Failed to delete owner: %v", err) - } - - // Wait for cascading deletion - if err := watcher.WaitForDeletion(testutil.Obj[corev1.Service]("owned-svc", "default")); err != nil { - t.Errorf("Cascading deletion failed: %v", err) - } -} diff --git a/pkg/resource-handler/controller/etcd/container_env.go b/pkg/resource-handler/controller/toposerver/container_env.go similarity index 84% rename from pkg/resource-handler/controller/etcd/container_env.go rename to pkg/resource-handler/controller/toposerver/container_env.go index 2b439dbc..3ed4b000 100644 --- a/pkg/resource-handler/controller/etcd/container_env.go +++ b/pkg/resource-handler/controller/toposerver/container_env.go @@ -1,4 +1,4 @@ -package etcd +package toposerver import ( "fmt" @@ -11,7 +11,7 @@ import ( // StatefulSets. This combines pod identity, etcd config, and cluster peer // discovery details. func buildContainerEnv( - etcdName, namespace string, + toposerverName, namespace string, replicas int32, serviceName string, ) []corev1.EnvVar { @@ -21,10 +21,10 @@ func buildContainerEnv( envVars = append(envVars, buildPodIdentityEnv()...) // Add etcd configuration variables - envVars = append(envVars, buildEtcdConfigEnv(etcdName, serviceName, namespace)...) + envVars = append(envVars, buildEtcdConfigEnv(toposerverName, serviceName, namespace)...) // Add the initial cluster peer list - clusterPeerList := buildEtcdClusterPeerList(etcdName, serviceName, namespace, replicas) + clusterPeerList := buildEtcdClusterPeerList(toposerverName, serviceName, namespace, replicas) envVars = append(envVars, corev1.EnvVar{ Name: "ETCD_INITIAL_CLUSTER", Value: clusterPeerList, @@ -63,7 +63,7 @@ func buildPodIdentityEnv() []corev1.EnvVar { // These configure etcd's network endpoints and cluster formation. // // Ref: https://etcd.io/docs/latest/op-guide/configuration/ -func buildEtcdConfigEnv(etcdName, serviceName, namespace string) []corev1.EnvVar { +func buildEtcdConfigEnv(toposerverName, serviceName, namespace string) []corev1.EnvVar { return []corev1.EnvVar{ { Name: "ETCD_NAME", @@ -101,7 +101,7 @@ func buildEtcdConfigEnv(etcdName, serviceName, namespace string) []corev1.EnvVar }, { Name: "ETCD_INITIAL_CLUSTER_TOKEN", - Value: etcdName, + Value: toposerverName, }, } } @@ -113,14 +113,17 @@ func buildEtcdConfigEnv(etcdName, serviceName, namespace string) []corev1.EnvVar // Format: member-0=http://member-0.service.ns.svc.cluster.local:2380,... // // Ref: https://etcd.io/docs/latest/op-guide/clustering/#static -func buildEtcdClusterPeerList(etcdName, serviceName, namespace string, replicas int32) string { +func buildEtcdClusterPeerList( + toposerverName, serviceName, namespace string, + replicas int32, +) string { if replicas < 0 { return "" } peers := make([]string, 0, replicas) for i := range replicas { - podName := fmt.Sprintf("%s-%d", etcdName, i) + podName := fmt.Sprintf("%s-%d", toposerverName, i) peerURL := fmt.Sprintf("%s=http://%s.%s.%s.svc.cluster.local:2380", podName, podName, serviceName, namespace) peers = append(peers, peerURL) diff --git a/pkg/resource-handler/controller/etcd/container_env_test.go b/pkg/resource-handler/controller/toposerver/container_env_test.go similarity index 51% rename from pkg/resource-handler/controller/etcd/container_env_test.go rename to pkg/resource-handler/controller/toposerver/container_env_test.go index 0a597888..2c38ac01 100644 --- a/pkg/resource-handler/controller/etcd/container_env_test.go +++ b/pkg/resource-handler/controller/toposerver/container_env_test.go @@ -1,4 +1,4 @@ -package etcd +package toposerver import ( "testing" @@ -36,15 +36,15 @@ func TestBuildPodIdentityEnv(t *testing.T) { func TestBuildEtcdConfigEnv(t *testing.T) { tests := map[string]struct { - etcdName string - serviceName string - namespace string - want []corev1.EnvVar + toposerverName string + serviceName string + namespace string + want []corev1.EnvVar }{ "basic configuration": { - etcdName: "my-etcd", - serviceName: "my-etcd-headless", - namespace: "default", + toposerverName: "my-toposerver", + serviceName: "my-toposerver-headless", + namespace: "default", want: []corev1.EnvVar{ {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, @@ -52,20 +52,20 @@ func TestBuildEtcdConfigEnv(t *testing.T) { {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, { Name: "ETCD_ADVERTISE_CLIENT_URLS", - Value: "http://$(POD_NAME).my-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379", + Value: "http://$(POD_NAME).my-toposerver-headless.$(POD_NAMESPACE).svc.cluster.local:2379", }, { Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", - Value: "http://$(POD_NAME).my-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380", + Value: "http://$(POD_NAME).my-toposerver-headless.$(POD_NAMESPACE).svc.cluster.local:2380", }, {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, - {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "my-etcd"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "my-toposerver"}, }, }, "different namespace": { - etcdName: "test-etcd", - serviceName: "test-etcd-headless", - namespace: "production", + toposerverName: "test-toposerver", + serviceName: "test-toposerver-headless", + namespace: "production", want: []corev1.EnvVar{ {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, @@ -73,20 +73,20 @@ func TestBuildEtcdConfigEnv(t *testing.T) { {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, { Name: "ETCD_ADVERTISE_CLIENT_URLS", - Value: "http://$(POD_NAME).test-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379", + Value: "http://$(POD_NAME).test-toposerver-headless.$(POD_NAMESPACE).svc.cluster.local:2379", }, { Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", - Value: "http://$(POD_NAME).test-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380", + Value: "http://$(POD_NAME).test-toposerver-headless.$(POD_NAMESPACE).svc.cluster.local:2380", }, {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, - {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "test-etcd"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "test-toposerver"}, }, }, "long names": { - etcdName: "very-long-etcd-cluster-name", - serviceName: "very-long-etcd-cluster-name-headless", - namespace: "kube-system", + toposerverName: "very-long-toposerver-cluster-name", + serviceName: "very-long-toposerver-cluster-name-headless", + namespace: "kube-system", want: []corev1.EnvVar{ {Name: "ETCD_NAME", Value: "$(POD_NAME)"}, {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, @@ -94,21 +94,21 @@ func TestBuildEtcdConfigEnv(t *testing.T) { {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, { Name: "ETCD_ADVERTISE_CLIENT_URLS", - Value: "http://$(POD_NAME).very-long-etcd-cluster-name-headless.$(POD_NAMESPACE).svc.cluster.local:2379", + Value: "http://$(POD_NAME).very-long-toposerver-cluster-name-headless.$(POD_NAMESPACE).svc.cluster.local:2379", }, { Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", - Value: "http://$(POD_NAME).very-long-etcd-cluster-name-headless.$(POD_NAMESPACE).svc.cluster.local:2380", + Value: "http://$(POD_NAME).very-long-toposerver-cluster-name-headless.$(POD_NAMESPACE).svc.cluster.local:2380", }, {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, - {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "very-long-etcd-cluster-name"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "very-long-toposerver-cluster-name"}, }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - got := buildEtcdConfigEnv(tc.etcdName, tc.serviceName, tc.namespace) + got := buildEtcdConfigEnv(tc.toposerverName, tc.serviceName, tc.namespace) if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("buildEtcdConfigEnv() mismatch (-want +got):\n%s", diff) } @@ -118,59 +118,64 @@ func TestBuildEtcdConfigEnv(t *testing.T) { func TestBuildEtcdClusterPeerList(t *testing.T) { tests := map[string]struct { - etcdName string - serviceName string - namespace string - replicas int32 - want string + toposerverName string + serviceName string + namespace string + replicas int32 + want string }{ "single replica": { - etcdName: "my-etcd", - serviceName: "my-etcd-headless", - namespace: "default", - replicas: 1, - want: "my-etcd-0=http://my-etcd-0.my-etcd-headless.default.svc.cluster.local:2380", + toposerverName: "my-toposerver", + serviceName: "my-toposerver-headless", + namespace: "default", + replicas: 1, + want: "my-toposerver-0=http://my-toposerver-0.my-toposerver-headless.default.svc.cluster.local:2380", }, "three replicas (typical HA)": { - etcdName: "my-etcd", - serviceName: "my-etcd-headless", - namespace: "default", - replicas: 3, - want: "my-etcd-0=http://my-etcd-0.my-etcd-headless.default.svc.cluster.local:2380,my-etcd-1=http://my-etcd-1.my-etcd-headless.default.svc.cluster.local:2380,my-etcd-2=http://my-etcd-2.my-etcd-headless.default.svc.cluster.local:2380", + toposerverName: "my-toposerver", + serviceName: "my-toposerver-headless", + namespace: "default", + replicas: 3, + want: "my-toposerver-0=http://my-toposerver-0.my-toposerver-headless.default.svc.cluster.local:2380,my-toposerver-1=http://my-toposerver-1.my-toposerver-headless.default.svc.cluster.local:2380,my-toposerver-2=http://my-toposerver-2.my-toposerver-headless.default.svc.cluster.local:2380", }, "five replicas": { - etcdName: "etcd-prod", - serviceName: "etcd-prod-headless", - namespace: "production", - replicas: 5, - want: "etcd-prod-0=http://etcd-prod-0.etcd-prod-headless.production.svc.cluster.local:2380,etcd-prod-1=http://etcd-prod-1.etcd-prod-headless.production.svc.cluster.local:2380,etcd-prod-2=http://etcd-prod-2.etcd-prod-headless.production.svc.cluster.local:2380,etcd-prod-3=http://etcd-prod-3.etcd-prod-headless.production.svc.cluster.local:2380,etcd-prod-4=http://etcd-prod-4.etcd-prod-headless.production.svc.cluster.local:2380", + toposerverName: "toposerver-prod", + serviceName: "toposerver-prod-headless", + namespace: "production", + replicas: 5, + want: "toposerver-prod-0=http://toposerver-prod-0.toposerver-prod-headless.production.svc.cluster.local:2380,toposerver-prod-1=http://toposerver-prod-1.toposerver-prod-headless.production.svc.cluster.local:2380,toposerver-prod-2=http://toposerver-prod-2.toposerver-prod-headless.production.svc.cluster.local:2380,toposerver-prod-3=http://toposerver-prod-3.toposerver-prod-headless.production.svc.cluster.local:2380,toposerver-prod-4=http://toposerver-prod-4.toposerver-prod-headless.production.svc.cluster.local:2380", }, "zero replicas": { - etcdName: "my-etcd", - serviceName: "my-etcd-headless", - namespace: "default", - replicas: 0, - want: "", + toposerverName: "my-toposerver", + serviceName: "my-toposerver-headless", + namespace: "default", + replicas: 0, + want: "", }, "negative replicas": { - etcdName: "my-etcd", - serviceName: "my-etcd-headless", - namespace: "default", - replicas: -1, - want: "", + toposerverName: "my-toposerver", + serviceName: "my-toposerver-headless", + namespace: "default", + replicas: -1, + want: "", }, "different namespace": { - etcdName: "kube-etcd", - serviceName: "kube-etcd-headless", - namespace: "kube-system", - replicas: 3, - want: "kube-etcd-0=http://kube-etcd-0.kube-etcd-headless.kube-system.svc.cluster.local:2380,kube-etcd-1=http://kube-etcd-1.kube-etcd-headless.kube-system.svc.cluster.local:2380,kube-etcd-2=http://kube-etcd-2.kube-etcd-headless.kube-system.svc.cluster.local:2380", + toposerverName: "kube-toposerver", + serviceName: "kube-toposerver-headless", + namespace: "kube-system", + replicas: 3, + want: "kube-toposerver-0=http://kube-toposerver-0.kube-toposerver-headless.kube-system.svc.cluster.local:2380,kube-toposerver-1=http://kube-toposerver-1.kube-toposerver-headless.kube-system.svc.cluster.local:2380,kube-toposerver-2=http://kube-toposerver-2.kube-toposerver-headless.kube-system.svc.cluster.local:2380", }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - got := buildEtcdClusterPeerList(tc.etcdName, tc.serviceName, tc.namespace, tc.replicas) + got := buildEtcdClusterPeerList( + tc.toposerverName, + tc.serviceName, + tc.namespace, + tc.replicas, + ) if got != tc.want { t.Errorf("buildEtcdClusterPeerList() = %v, want %v", got, tc.want) } @@ -180,17 +185,17 @@ func TestBuildEtcdClusterPeerList(t *testing.T) { func TestBuildContainerEnv(t *testing.T) { tests := map[string]struct { - etcdName string - namespace string - replicas int32 - serviceName string - want []corev1.EnvVar + toposerverName string + namespace string + replicas int32 + serviceName string + want []corev1.EnvVar }{ "complete environment with 3 replicas": { - etcdName: "my-etcd", - namespace: "default", - replicas: 3, - serviceName: "my-etcd-headless", + toposerverName: "my-toposerver", + namespace: "default", + replicas: 3, + serviceName: "my-toposerver-headless", want: []corev1.EnvVar{ { Name: "POD_NAME", @@ -214,25 +219,25 @@ func TestBuildContainerEnv(t *testing.T) { {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, { Name: "ETCD_ADVERTISE_CLIENT_URLS", - Value: "http://$(POD_NAME).my-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379", + Value: "http://$(POD_NAME).my-toposerver-headless.$(POD_NAMESPACE).svc.cluster.local:2379", }, { Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", - Value: "http://$(POD_NAME).my-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380", + Value: "http://$(POD_NAME).my-toposerver-headless.$(POD_NAMESPACE).svc.cluster.local:2380", }, {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, - {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "my-etcd"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "my-toposerver"}, { Name: "ETCD_INITIAL_CLUSTER", - Value: "my-etcd-0=http://my-etcd-0.my-etcd-headless.default.svc.cluster.local:2380,my-etcd-1=http://my-etcd-1.my-etcd-headless.default.svc.cluster.local:2380,my-etcd-2=http://my-etcd-2.my-etcd-headless.default.svc.cluster.local:2380", + Value: "my-toposerver-0=http://my-toposerver-0.my-toposerver-headless.default.svc.cluster.local:2380,my-toposerver-1=http://my-toposerver-1.my-toposerver-headless.default.svc.cluster.local:2380,my-toposerver-2=http://my-toposerver-2.my-toposerver-headless.default.svc.cluster.local:2380", }, }, }, "single replica": { - etcdName: "test-etcd", - namespace: "test", - replicas: 1, - serviceName: "test-etcd-headless", + toposerverName: "test-toposerver", + namespace: "test", + replicas: 1, + serviceName: "test-toposerver-headless", want: []corev1.EnvVar{ { Name: "POD_NAME", @@ -256,27 +261,27 @@ func TestBuildContainerEnv(t *testing.T) { {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, { Name: "ETCD_ADVERTISE_CLIENT_URLS", - Value: "http://$(POD_NAME).test-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379", + Value: "http://$(POD_NAME).test-toposerver-headless.$(POD_NAMESPACE).svc.cluster.local:2379", }, { Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", - Value: "http://$(POD_NAME).test-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380", + Value: "http://$(POD_NAME).test-toposerver-headless.$(POD_NAMESPACE).svc.cluster.local:2380", }, // Cluster setup won't happen in a single cluster, and these // env variables are only used at startup. {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, - {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "test-etcd"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "test-toposerver"}, { Name: "ETCD_INITIAL_CLUSTER", - Value: "test-etcd-0=http://test-etcd-0.test-etcd-headless.test.svc.cluster.local:2380", + Value: "test-toposerver-0=http://test-toposerver-0.test-toposerver-headless.test.svc.cluster.local:2380", }, }, }, "zero replicas - no ETCD_INITIAL_CLUSTER": { - etcdName: "empty-etcd", - namespace: "default", - replicas: 0, - serviceName: "empty-etcd-headless", + toposerverName: "empty-toposerver", + namespace: "default", + replicas: 0, + serviceName: "empty-toposerver-headless", want: []corev1.EnvVar{ { Name: "POD_NAME", @@ -300,18 +305,18 @@ func TestBuildContainerEnv(t *testing.T) { {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, { Name: "ETCD_ADVERTISE_CLIENT_URLS", - Value: "http://$(POD_NAME).empty-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379", + Value: "http://$(POD_NAME).empty-toposerver-headless.$(POD_NAMESPACE).svc.cluster.local:2379", }, { Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", - Value: "http://$(POD_NAME).empty-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380", + Value: "http://$(POD_NAME).empty-toposerver-headless.$(POD_NAMESPACE).svc.cluster.local:2380", }, // Cluster setup won't happen in a single cluster, and these // env variables are only used at startup. In case of scaling up // from zero replica, the updated env variable will be picked up // correctly, and thus an empty variable like this will be OK. {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, - {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "empty-etcd"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "empty-toposerver"}, {Name: "ETCD_INITIAL_CLUSTER"}, }, }, @@ -319,7 +324,7 @@ func TestBuildContainerEnv(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - got := buildContainerEnv(tc.etcdName, tc.namespace, tc.replicas, tc.serviceName) + got := buildContainerEnv(tc.toposerverName, tc.namespace, tc.replicas, tc.serviceName) if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("BuildContainerEnv() mismatch (-want +got):\n%s", diff) } diff --git a/pkg/resource-handler/controller/etcd/integration_test.go b/pkg/resource-handler/controller/toposerver/integration_test.go similarity index 65% rename from pkg/resource-handler/controller/etcd/integration_test.go rename to pkg/resource-handler/controller/toposerver/integration_test.go index c801e4ab..feaa336d 100644 --- a/pkg/resource-handler/controller/etcd/integration_test.go +++ b/pkg/resource-handler/controller/toposerver/integration_test.go @@ -1,13 +1,12 @@ //go:build integration // +build integration -package etcd_test +package toposerver_test import ( + "path/filepath" "testing" - multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" - "github.com/numtide/multigres-operator/pkg/resource-handler/controller/testutil" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -18,28 +17,36 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - etcdcontroller "github.com/numtide/multigres-operator/pkg/resource-handler/controller/etcd" + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" + toposervercontroller "github.com/numtide/multigres-operator/pkg/resource-handler/controller/toposerver" + "github.com/numtide/multigres-operator/pkg/testutil" ) func TestSetupWithManager(t *testing.T) { + t.Parallel() + scheme := runtime.NewScheme() _ = multigresv1alpha1.AddToScheme(scheme) _ = appsv1.AddToScheme(scheme) _ = corev1.AddToScheme(scheme) - cfg := testutil.SetUpEnvtest(t) - mgr := testutil.SetUpManager(t, cfg, scheme) - testutil.StartManager(t, mgr) + mgr := testutil.SetUpEnvtestManager(t, scheme, + testutil.WithCRDPaths( + filepath.Join("../../../../", "config", "crd", "bases"), + ), + ) - if err := (&etcdcontroller.EtcdReconciler{ + if err := (&toposervercontroller.TopoServerReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { + }).SetupWithManager(mgr, controller.Options{ + SkipNameValidation: ptr.To(true), + }); err != nil { t.Fatalf("Failed to create controller, %v", err) } } -func TestEtcdReconciliation(t *testing.T) { +func TestTopoServerReconciliation(t *testing.T) { t.Parallel() scheme := runtime.NewScheme() @@ -48,37 +55,41 @@ func TestEtcdReconciliation(t *testing.T) { _ = corev1.AddToScheme(scheme) tests := map[string]struct { - etcd *multigresv1alpha1.Etcd + toposerver *multigresv1alpha1.TopoServer existingObjects []client.Object failureConfig *testutil.FailureConfig wantResources []client.Object - assertFunc func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) + wantErr bool + wantRequeue bool + assertFunc func(t *testing.T, c client.Client, toposerver *multigresv1alpha1.TopoServer) }{ - "simple etcd input": { - etcd: &multigresv1alpha1.Etcd{ + "simple toposerver input": { + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{ + RootPath: "/vitess/global", + }, }, wantResources: []client.Object{ &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", - Labels: etcdLabels(t, "test-etcd"), - OwnerReferences: etcdOwnerRefs(t, "test-etcd"), + Labels: toposerverLabels(t, "test-toposerver"), + OwnerReferences: toposerverOwnerRefs(t, "test-toposerver"), }, Spec: appsv1.StatefulSetSpec{ Replicas: ptr.To(int32(3)), - ServiceName: "test-etcd-headless", + ServiceName: "test-toposerver-headless", Selector: &metav1.LabelSelector{ - MatchLabels: etcdLabels(t, "test-etcd"), + MatchLabels: toposerverLabels(t, "test-toposerver"), }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: etcdLabels(t, "test-etcd"), + Labels: toposerverLabels(t, "test-toposerver"), }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -112,11 +123,11 @@ func TestEtcdReconciliation(t *testing.T) { {Name: "ETCD_DATA_DIR", Value: "/var/lib/etcd"}, {Name: "ETCD_LISTEN_CLIENT_URLS", Value: "http://0.0.0.0:2379"}, {Name: "ETCD_LISTEN_PEER_URLS", Value: "http://0.0.0.0:2380"}, - {Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: "http://$(POD_NAME).test-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2379"}, - {Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: "http://$(POD_NAME).test-etcd-headless.$(POD_NAMESPACE).svc.cluster.local:2380"}, + {Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: "http://$(POD_NAME).test-toposerver-headless.$(POD_NAMESPACE).svc.cluster.local:2379"}, + {Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: "http://$(POD_NAME).test-toposerver-headless.$(POD_NAMESPACE).svc.cluster.local:2380"}, {Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"}, - {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "test-etcd"}, - {Name: "ETCD_INITIAL_CLUSTER", Value: "test-etcd-0=http://test-etcd-0.test-etcd-headless.default.svc.cluster.local:2380,test-etcd-1=http://test-etcd-1.test-etcd-headless.default.svc.cluster.local:2380,test-etcd-2=http://test-etcd-2.test-etcd-headless.default.svc.cluster.local:2380"}, + {Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: "test-toposerver"}, + {Name: "ETCD_INITIAL_CLUSTER", Value: "test-toposerver-0=http://test-toposerver-0.test-toposerver-headless.default.svc.cluster.local:2380,test-toposerver-1=http://test-toposerver-1.test-toposerver-headless.default.svc.cluster.local:2380,test-toposerver-2=http://test-toposerver-2.test-toposerver-headless.default.svc.cluster.local:2380"}, }, VolumeMounts: []corev1.VolumeMount{ {Name: "data", MountPath: "/var/lib/etcd"}, @@ -146,25 +157,25 @@ func TestEtcdReconciliation(t *testing.T) { }, &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", - Labels: etcdLabels(t, "test-etcd"), - OwnerReferences: etcdOwnerRefs(t, "test-etcd"), + Labels: toposerverLabels(t, "test-toposerver"), + OwnerReferences: toposerverOwnerRefs(t, "test-toposerver"), }, Spec: corev1.ServiceSpec{ Type: corev1.ServiceTypeClusterIP, Ports: []corev1.ServicePort{ tcpServicePort(t, "client", 2379), }, - Selector: etcdLabels(t, "test-etcd"), + Selector: toposerverLabels(t, "test-toposerver"), }, }, &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-headless", + Name: "test-toposerver-headless", Namespace: "default", - Labels: etcdLabels(t, "test-etcd"), - OwnerReferences: etcdOwnerRefs(t, "test-etcd"), + Labels: toposerverLabels(t, "test-toposerver"), + OwnerReferences: toposerverOwnerRefs(t, "test-toposerver"), }, Spec: corev1.ServiceSpec{ Type: corev1.ServiceTypeClusterIP, @@ -173,28 +184,23 @@ func TestEtcdReconciliation(t *testing.T) { tcpServicePort(t, "client", 2379), tcpServicePort(t, "peer", 2380), }, - Selector: etcdLabels(t, "test-etcd"), + Selector: toposerverLabels(t, "test-toposerver"), PublishNotReadyAddresses: true, }, }, }, }, - // "another test": { - // etcd: &multigresv1alpha1.Etcd{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: "test-etcd", - // Namespace: "default", - // }, - // Spec: multigresv1alpha1.EtcdSpec{}, - // }, - // }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { t.Parallel() ctx := t.Context() - mgr := testutil.SetUpEnvtestManager(t, scheme) + mgr := testutil.SetUpEnvtestManager(t, scheme, + testutil.WithCRDPaths( + filepath.Join("../../../../", "config", "crd", "bases"), + ), + ) watcher := testutil.NewResourceWatcher(t, ctx, mgr, testutil.WithCmpOpts( @@ -204,22 +210,22 @@ func TestEtcdReconciliation(t *testing.T) { testutil.IgnorePodSpecDefaults(), testutil.IgnoreStatefulSetSpecDefaults(), ), - testutil.WithExtraResource(&multigresv1alpha1.Etcd{}), + testutil.WithExtraResource(&multigresv1alpha1.TopoServer{}), ) client := mgr.GetClient() - etcdReconciler := &etcdcontroller.EtcdReconciler{ + toposerverReconciler := &toposervercontroller.TopoServerReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), } - if err := etcdReconciler.SetupWithManager(mgr, controller.Options{ + if err := toposerverReconciler.SetupWithManager(mgr, controller.Options{ // Needed for the parallel test runs SkipNameValidation: ptr.To(true), }); err != nil { t.Fatalf("Failed to create controller, %v", err) } - if err := client.Create(ctx, tc.etcd); err != nil { + if err := client.Create(ctx, tc.toposerver); err != nil { t.Fatalf("Failed to create the initial item, %v", err) } @@ -233,26 +239,25 @@ func TestEtcdReconciliation(t *testing.T) { // Test helpers -// etcdLabels returns standard labels for etcd resources in tests -func etcdLabels(t testing.TB, instanceName string) map[string]string { +// toposerverLabels returns standard labels for toposerver resources in tests +func toposerverLabels(t testing.TB, instanceName string) map[string]string { t.Helper() return map[string]string{ - "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/component": "toposerver", "app.kubernetes.io/instance": instanceName, "app.kubernetes.io/managed-by": "multigres-operator", "app.kubernetes.io/name": "multigres", "app.kubernetes.io/part-of": "multigres", - "multigres.com/cell": "multigres-global-topo", } } -// etcdOwnerRefs returns owner references for an Etcd resource -func etcdOwnerRefs(t testing.TB, etcdName string) []metav1.OwnerReference { +// toposerverOwnerRefs returns owner references for a TopoServer resource +func toposerverOwnerRefs(t testing.TB, toposerverName string) []metav1.OwnerReference { t.Helper() return []metav1.OwnerReference{{ APIVersion: "multigres.com/v1alpha1", - Kind: "Etcd", - Name: etcdName, + Kind: "TopoServer", + Name: toposerverName, Controller: ptr.To(true), BlockOwnerDeletion: ptr.To(true), }} diff --git a/pkg/resource-handler/controller/etcd/ports.go b/pkg/resource-handler/controller/toposerver/ports.go similarity index 61% rename from pkg/resource-handler/controller/etcd/ports.go rename to pkg/resource-handler/controller/toposerver/ports.go index 8d77e880..1ad528d7 100644 --- a/pkg/resource-handler/controller/etcd/ports.go +++ b/pkg/resource-handler/controller/toposerver/ports.go @@ -1,4 +1,4 @@ -package etcd +package toposerver import ( corev1 "k8s.io/api/core/v1" @@ -16,17 +16,17 @@ const ( ) // buildContainerPorts creates the port definitions for the etcd container. -// Uses default ports since EtcdSpec doesn't have port configuration yet. -func buildContainerPorts(etcd *multigresv1alpha1.Etcd) []corev1.ContainerPort { +// Uses default ports since TopoServerSpec doesn't have port configuration yet. +func buildContainerPorts(toposerver *multigresv1alpha1.TopoServer) []corev1.ContainerPort { clientPort := ClientPort peerPort := PeerPort - // TODO: When EtcdSpec has port fields, use them: - // if etcd.Spec.ClientPort != 0 { - // clientPort = etcd.Spec.ClientPort + // TODO: When TopoServerSpec has port fields, use them: + // if toposerver.Spec.ClientPort != 0 { + // clientPort = toposerver.Spec.ClientPort // } - // if etcd.Spec.PeerPort != 0 { - // peerPort = etcd.Spec.PeerPort + // if toposerver.Spec.PeerPort != 0 { + // peerPort = toposerver.Spec.PeerPort // } return []corev1.ContainerPort{ @@ -45,16 +45,16 @@ func buildContainerPorts(etcd *multigresv1alpha1.Etcd) []corev1.ContainerPort { // buildHeadlessServicePorts creates service ports for the headless service. // Includes both client and peer ports for StatefulSet pod discovery. -func buildHeadlessServicePorts(etcd *multigresv1alpha1.Etcd) []corev1.ServicePort { +func buildHeadlessServicePorts(toposerver *multigresv1alpha1.TopoServer) []corev1.ServicePort { clientPort := ClientPort peerPort := PeerPort - // TODO: When EtcdSpec has port fields, use them: - // if etcd.Spec.ClientPort != 0 { - // clientPort = etcd.Spec.ClientPort + // TODO: When TopoServerSpec has port fields, use them: + // if toposerver.Spec.ClientPort != 0 { + // clientPort = toposerver.Spec.ClientPort // } - // if etcd.Spec.PeerPort != 0 { - // peerPort = etcd.Spec.PeerPort + // if toposerver.Spec.PeerPort != 0 { + // peerPort = toposerver.Spec.PeerPort // } return []corev1.ServicePort{ @@ -75,12 +75,12 @@ func buildHeadlessServicePorts(etcd *multigresv1alpha1.Etcd) []corev1.ServicePor // buildClientServicePorts creates service ports for the client service. // Only includes the client port for external access. -func buildClientServicePorts(etcd *multigresv1alpha1.Etcd) []corev1.ServicePort { +func buildClientServicePorts(toposerver *multigresv1alpha1.TopoServer) []corev1.ServicePort { clientPort := ClientPort - // TODO: When EtcdSpec has clientPort field, use it: - // if etcd.Spec.ClientPort != 0 { - // clientPort = etcd.Spec.ClientPort + // TODO: When TopoServerSpec has clientPort field, use it: + // if toposerver.Spec.ClientPort != 0 { + // clientPort = toposerver.Spec.ClientPort // } return []corev1.ServicePort{ diff --git a/pkg/resource-handler/controller/etcd/ports_test.go b/pkg/resource-handler/controller/toposerver/ports_test.go similarity index 73% rename from pkg/resource-handler/controller/etcd/ports_test.go rename to pkg/resource-handler/controller/toposerver/ports_test.go index 73475a2b..e411951f 100644 --- a/pkg/resource-handler/controller/etcd/ports_test.go +++ b/pkg/resource-handler/controller/toposerver/ports_test.go @@ -1,4 +1,4 @@ -package etcd +package toposerver import ( "testing" @@ -13,16 +13,16 @@ import ( func TestBuildContainerPorts(t *testing.T) { tests := map[string]struct { - etcd *multigresv1alpha1.Etcd - want []corev1.ContainerPort + toposerver *multigresv1alpha1.TopoServer + want []corev1.ContainerPort }{ "default ports": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, want: []corev1.ContainerPort{ { @@ -41,7 +41,7 @@ func TestBuildContainerPorts(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - got := buildContainerPorts(tc.etcd) + got := buildContainerPorts(tc.toposerver) if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("buildContainerPorts() mismatch (-want +got):\n%s", diff) } @@ -51,16 +51,16 @@ func TestBuildContainerPorts(t *testing.T) { func TestBuildHeadlessServicePorts(t *testing.T) { tests := map[string]struct { - etcd *multigresv1alpha1.Etcd - want []corev1.ServicePort + toposerver *multigresv1alpha1.TopoServer + want []corev1.ServicePort }{ "default ports": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, want: []corev1.ServicePort{ { @@ -81,7 +81,7 @@ func TestBuildHeadlessServicePorts(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - got := buildHeadlessServicePorts(tc.etcd) + got := buildHeadlessServicePorts(tc.toposerver) if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("buildHeadlessServicePorts() mismatch (-want +got):\n%s", diff) } @@ -91,16 +91,16 @@ func TestBuildHeadlessServicePorts(t *testing.T) { func TestBuildClientServicePorts(t *testing.T) { tests := map[string]struct { - etcd *multigresv1alpha1.Etcd - want []corev1.ServicePort + toposerver *multigresv1alpha1.TopoServer + want []corev1.ServicePort }{ "default port": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, want: []corev1.ServicePort{ { @@ -115,7 +115,7 @@ func TestBuildClientServicePorts(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - got := buildClientServicePorts(tc.etcd) + got := buildClientServicePorts(tc.toposerver) if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("buildClientServicePorts() mismatch (-want +got):\n%s", diff) } diff --git a/pkg/resource-handler/controller/etcd/service.go b/pkg/resource-handler/controller/toposerver/service.go similarity index 55% rename from pkg/resource-handler/controller/etcd/service.go rename to pkg/resource-handler/controller/toposerver/service.go index 3161b44f..d09fb504 100644 --- a/pkg/resource-handler/controller/etcd/service.go +++ b/pkg/resource-handler/controller/toposerver/service.go @@ -1,4 +1,4 @@ -package etcd +package toposerver import ( "fmt" @@ -12,57 +12,61 @@ import ( "github.com/numtide/multigres-operator/pkg/resource-handler/controller/metadata" ) -// BuildHeadlessService creates a headless Service for the Etcd StatefulSet. +// BuildHeadlessService creates a headless Service for the TopoServer StatefulSet. // Headless services are required for StatefulSet pod DNS records. func BuildHeadlessService( - etcd *multigresv1alpha1.Etcd, + toposerver *multigresv1alpha1.TopoServer, scheme *runtime.Scheme, ) (*corev1.Service, error) { - labels := metadata.BuildStandardLabels(etcd.Name, ComponentName, etcd.Spec.CellName) + // TODO: Support cell-local TopoServers by adding CellName field to TopoServerSpec + // For now, TopoServer is always global topology + labels := metadata.BuildStandardLabels(toposerver.Name, ComponentName) svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: etcd.Name + "-headless", - Namespace: etcd.Namespace, + Name: toposerver.Name + "-headless", + Namespace: toposerver.Namespace, Labels: labels, }, Spec: corev1.ServiceSpec{ ClusterIP: corev1.ClusterIPNone, Selector: labels, - Ports: buildHeadlessServicePorts(etcd), + Ports: buildHeadlessServicePorts(toposerver), PublishNotReadyAddresses: true, }, } - if err := ctrl.SetControllerReference(etcd, svc, scheme); err != nil { + if err := ctrl.SetControllerReference(toposerver, svc, scheme); err != nil { return nil, fmt.Errorf("failed to set controller reference: %w", err) } return svc, nil } -// BuildClientService creates a client Service for external access to Etcd. +// BuildClientService creates a client Service for external access to TopoServer. // This service load balances across all etcd members. func BuildClientService( - etcd *multigresv1alpha1.Etcd, + toposerver *multigresv1alpha1.TopoServer, scheme *runtime.Scheme, ) (*corev1.Service, error) { - labels := metadata.BuildStandardLabels(etcd.Name, ComponentName, etcd.Spec.CellName) + // TODO: Support cell-local TopoServers by adding CellName field to TopoServerSpec + // For now, TopoServer is always global topology + labels := metadata.BuildStandardLabels(toposerver.Name, ComponentName) svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: etcd.Name, - Namespace: etcd.Namespace, + Name: toposerver.Name, + Namespace: toposerver.Namespace, Labels: labels, }, Spec: corev1.ServiceSpec{ Type: corev1.ServiceTypeClusterIP, Selector: labels, - Ports: buildClientServicePorts(etcd), + Ports: buildClientServicePorts(toposerver), }, } - if err := ctrl.SetControllerReference(etcd, svc, scheme); err != nil { + if err := ctrl.SetControllerReference(toposerver, svc, scheme); err != nil { return nil, fmt.Errorf("failed to set controller reference: %w", err) } diff --git a/pkg/resource-handler/controller/toposerver/service_test.go b/pkg/resource-handler/controller/toposerver/service_test.go new file mode 100644 index 00000000..02e31fc8 --- /dev/null +++ b/pkg/resource-handler/controller/toposerver/service_test.go @@ -0,0 +1,189 @@ +package toposerver + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + + multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" +) + +func TestBuildHeadlessService(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + tests := map[string]struct { + toposerver *multigresv1alpha1.TopoServer + scheme *runtime.Scheme + want *corev1.Service + wantErr bool + }{ + "minimal spec": { + toposerver: &multigresv1alpha1.TopoServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-toposerver", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.TopoServerChildSpec{}, + }, + scheme: scheme, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-toposerver-headless", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-toposerver", + "app.kubernetes.io/component": "toposerver", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "TopoServer", + Name: "test-toposerver", + UID: "test-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: corev1.ClusterIPNone, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-toposerver", + "app.kubernetes.io/component": "toposerver", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + Ports: []corev1.ServicePort{ + { + Name: "client", + Port: 2379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + { + Name: "peer", + Port: 2380, + TargetPort: intstr.FromString("peer"), + Protocol: corev1.ProtocolTCP, + }, + }, + PublishNotReadyAddresses: true, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got, err := BuildHeadlessService(tc.toposerver, tc.scheme) + + if (err != nil) != tc.wantErr { + t.Errorf("BuildHeadlessService() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildHeadlessService() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestBuildClientService(t *testing.T) { + scheme := runtime.NewScheme() + _ = multigresv1alpha1.AddToScheme(scheme) + + tests := map[string]struct { + toposerver *multigresv1alpha1.TopoServer + scheme *runtime.Scheme + want *corev1.Service + wantErr bool + }{ + "minimal spec": { + toposerver: &multigresv1alpha1.TopoServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-toposerver", + Namespace: "default", + UID: "test-uid", + }, + Spec: multigresv1alpha1.TopoServerChildSpec{}, + }, + scheme: scheme, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-toposerver", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-toposerver", + "app.kubernetes.io/component": "toposerver", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "multigres.com/v1alpha1", + Kind: "TopoServer", + Name: "test-toposerver", + UID: "test-uid", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "app.kubernetes.io/name": "multigres", + "app.kubernetes.io/instance": "test-toposerver", + "app.kubernetes.io/component": "toposerver", + "app.kubernetes.io/part-of": "multigres", + "app.kubernetes.io/managed-by": "multigres-operator", + }, + Ports: []corev1.ServicePort{ + { + Name: "client", + Port: 2379, + TargetPort: intstr.FromString("client"), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got, err := BuildClientService(tc.toposerver, tc.scheme) + + if (err != nil) != tc.wantErr { + t.Errorf("BuildClientService() error = %v, wantErr %v", err, tc.wantErr) + return + } + + if tc.wantErr { + return + } + + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("BuildClientService() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/resource-handler/controller/etcd/statefulset.go b/pkg/resource-handler/controller/toposerver/statefulset.go similarity index 55% rename from pkg/resource-handler/controller/etcd/statefulset.go rename to pkg/resource-handler/controller/toposerver/statefulset.go index 2e8d94f4..450bdc6d 100644 --- a/pkg/resource-handler/controller/etcd/statefulset.go +++ b/pkg/resource-handler/controller/toposerver/statefulset.go @@ -1,4 +1,4 @@ -package etcd +package toposerver import ( "fmt" @@ -15,8 +15,8 @@ import ( ) const ( - // ComponentName is the component label value for etcd resources - ComponentName = "etcd" + // ComponentName is the component label value for toposerver resources + ComponentName = "toposerver" // DefaultReplicas is the default number of etcd replicas DefaultReplicas int32 = 3 @@ -34,31 +34,31 @@ const ( DataMountPath = "/var/lib/etcd" ) -// BuildStatefulSet creates a StatefulSet for the Etcd cluster. -// Returns a deterministic StatefulSet based on the Etcd spec. +// BuildStatefulSet creates a StatefulSet for the TopoServer cluster. +// Returns a deterministic StatefulSet based on the TopoServer spec. func BuildStatefulSet( - etcd *multigresv1alpha1.Etcd, + toposerver *multigresv1alpha1.TopoServer, scheme *runtime.Scheme, ) (*appsv1.StatefulSet, error) { replicas := DefaultReplicas - // TODO: Debatable whether this defaulting makes sense. - if etcd.Spec.Replicas != nil { - replicas = *etcd.Spec.Replicas + if toposerver.Spec.Replicas != nil { + replicas = *toposerver.Spec.Replicas } image := DefaultImage - if etcd.Spec.Image != "" { - image = etcd.Spec.Image + if toposerver.Spec.Image != "" { + image = toposerver.Spec.Image } - headlessServiceName := etcd.Name + "-headless" - labels := metadata.BuildStandardLabels(etcd.Name, ComponentName, etcd.Spec.CellName) - podLabels := metadata.MergeLabels(labels, etcd.Spec.PodLabels) + headlessServiceName := toposerver.Name + "-headless" + // TODO: Support cell-local TopoServers by adding CellName field to TopoServerSpec + // For now, TopoServer is always global topology + labels := metadata.BuildStandardLabels(toposerver.Name, ComponentName) sts := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: etcd.Name, - Namespace: etcd.Namespace, + Name: toposerver.Name, + Namespace: toposerver.Namespace, Labels: labels, }, Spec: appsv1.StatefulSetSpec{ @@ -73,24 +73,21 @@ func BuildStatefulSet( }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: podLabels, - Annotations: etcd.Spec.PodAnnotations, + Labels: labels, }, Spec: corev1.PodSpec{ - ServiceAccountName: etcd.Spec.ServiceAccountName, - ImagePullSecrets: etcd.Spec.ImagePullSecrets, Containers: []corev1.Container{ { Name: "etcd", Image: image, - Resources: etcd.Spec.Resources, + Resources: toposerver.Spec.Resources, Env: buildContainerEnv( - etcd.Name, - etcd.Namespace, + toposerver.Name, + toposerver.Namespace, replicas, headlessServiceName, ), - Ports: buildContainerPorts(etcd), + Ports: buildContainerPorts(toposerver), VolumeMounts: []corev1.VolumeMount{ { Name: DataVolumeName, @@ -99,17 +96,14 @@ func BuildStatefulSet( }, }, }, - Affinity: etcd.Spec.Affinity, - Tolerations: etcd.Spec.Tolerations, - NodeSelector: etcd.Spec.NodeSelector, - TopologySpreadConstraints: etcd.Spec.TopologySpreadConstraints, + Affinity: toposerver.Spec.Affinity, }, }, - VolumeClaimTemplates: buildVolumeClaimTemplates(etcd), + VolumeClaimTemplates: buildVolumeClaimTemplates(toposerver), }, } - if err := ctrl.SetControllerReference(etcd, sts, scheme); err != nil { + if err := ctrl.SetControllerReference(toposerver, sts, scheme); err != nil { return nil, fmt.Errorf("failed to set controller reference: %w", err) } @@ -117,25 +111,24 @@ func BuildStatefulSet( } // buildVolumeClaimTemplates creates the PVC templates for etcd data storage. -// Caller decides whether to use VolumeClaimTemplate or build from simple fields. -func buildVolumeClaimTemplates(etcd *multigresv1alpha1.Etcd) []corev1.PersistentVolumeClaim { - if etcd.Spec.VolumeClaimTemplate != nil { +// TODO: Add StorageSize and StorageClassName fields to TopoServerSpec for simpler configuration +// (similar to Etcd). For now, only DataVolumeClaimTemplate is supported. +func buildVolumeClaimTemplates( + toposerver *multigresv1alpha1.TopoServer, +) []corev1.PersistentVolumeClaim { + if len(toposerver.Spec.DataVolumeClaimTemplate.AccessModes) > 0 { return []corev1.PersistentVolumeClaim{ { ObjectMeta: metav1.ObjectMeta{ Name: DataVolumeName, }, - Spec: *etcd.Spec.VolumeClaimTemplate, + Spec: toposerver.Spec.DataVolumeClaimTemplate, }, } } - storageSize := DefaultStorageSize - if etcd.Spec.StorageSize != "" { - storageSize = etcd.Spec.StorageSize - } - + // Use default storage if not specified return []corev1.PersistentVolumeClaim{ - storage.BuildPVCTemplate(DataVolumeName, etcd.Spec.StorageClassName, storageSize), + storage.BuildPVCTemplate(DataVolumeName, nil, DefaultStorageSize), } } diff --git a/pkg/resource-handler/controller/etcd/statefulset_test.go b/pkg/resource-handler/controller/toposerver/statefulset_test.go similarity index 54% rename from pkg/resource-handler/controller/etcd/statefulset_test.go rename to pkg/resource-handler/controller/toposerver/statefulset_test.go index 3eb3f0d1..ddf7a02f 100644 --- a/pkg/resource-handler/controller/etcd/statefulset_test.go +++ b/pkg/resource-handler/controller/toposerver/statefulset_test.go @@ -1,4 +1,4 @@ -package etcd +package toposerver import ( "testing" @@ -9,76 +9,63 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" ) -func int32Ptr(i int32) *int32 { - return &i -} - -func boolPtr(b bool) *bool { - return &b -} - -func stringPtr(s string) *string { - return &s -} - func TestBuildStatefulSet(t *testing.T) { scheme := runtime.NewScheme() _ = multigresv1alpha1.AddToScheme(scheme) tests := map[string]struct { - etcd *multigresv1alpha1.Etcd - scheme *runtime.Scheme - want *appsv1.StatefulSet - wantErr bool + toposerver *multigresv1alpha1.TopoServer + scheme *runtime.Scheme + want *appsv1.StatefulSet + wantErr bool }{ "minimal spec - all defaults": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", UID: "test-uid", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, scheme: scheme, want: &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", Labels: map[string]string{ "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-etcd", - "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/instance": "test-toposerver", + "app.kubernetes.io/component": "toposerver", "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", }, OwnerReferences: []metav1.OwnerReference{ { APIVersion: "multigres.com/v1alpha1", - Kind: "Etcd", - Name: "test-etcd", + Kind: "TopoServer", + Name: "test-toposerver", UID: "test-uid", - Controller: boolPtr(true), - BlockOwnerDeletion: boolPtr(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, }, }, Spec: appsv1.StatefulSetSpec{ - ServiceName: "test-etcd-headless", - Replicas: int32Ptr(3), + ServiceName: "test-toposerver-headless", + Replicas: ptr.To(int32(3)), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-etcd", - "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/instance": "test-toposerver", + "app.kubernetes.io/component": "toposerver", "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", }, }, PodManagementPolicy: appsv1.ParallelPodManagement, @@ -89,11 +76,10 @@ func TestBuildStatefulSet(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-etcd", - "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/instance": "test-toposerver", + "app.kubernetes.io/component": "toposerver", "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", }, }, Spec: corev1.PodSpec{ @@ -103,10 +89,10 @@ func TestBuildStatefulSet(t *testing.T) { Image: DefaultImage, Resources: corev1.ResourceRequirements{}, Env: buildContainerEnv( - "test-etcd", + "test-toposerver", "default", 3, - "test-etcd-headless", + "test-toposerver-headless", ), Ports: buildContainerPorts(nil), // Default VolumeMounts: []corev1.VolumeMount{ @@ -142,52 +128,52 @@ func TestBuildStatefulSet(t *testing.T) { }, }, "custom replicas and image": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "etcd-custom", + Name: "toposerver-custom", Namespace: "test", UID: "custom-uid", }, - Spec: multigresv1alpha1.EtcdSpec{ - Replicas: int32Ptr(5), - Image: "quay.io/coreos/etcd:v3.5.15", + Spec: multigresv1alpha1.TopoServerChildSpec{ + TopoServerSpec: multigresv1alpha1.TopoServerSpec{ + Replicas: ptr.To(int32(5)), + Image: "quay.io/coreos/etcd:v3.5.15", + }, }, }, scheme: scheme, want: &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "etcd-custom", + Name: "toposerver-custom", Namespace: "test", Labels: map[string]string{ "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "etcd-custom", - "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/instance": "toposerver-custom", + "app.kubernetes.io/component": "toposerver", "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", }, OwnerReferences: []metav1.OwnerReference{ { APIVersion: "multigres.com/v1alpha1", - Kind: "Etcd", - Name: "etcd-custom", + Kind: "TopoServer", + Name: "toposerver-custom", UID: "custom-uid", - Controller: boolPtr(true), - BlockOwnerDeletion: boolPtr(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, }, }, Spec: appsv1.StatefulSetSpec{ - ServiceName: "etcd-custom-headless", - Replicas: int32Ptr(5), + ServiceName: "toposerver-custom-headless", + Replicas: ptr.To(int32(5)), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "etcd-custom", - "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/instance": "toposerver-custom", + "app.kubernetes.io/component": "toposerver", "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", }, }, PodManagementPolicy: appsv1.ParallelPodManagement, @@ -198,11 +184,10 @@ func TestBuildStatefulSet(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "etcd-custom", - "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/instance": "toposerver-custom", + "app.kubernetes.io/component": "toposerver", "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", }, }, Spec: corev1.PodSpec{ @@ -212,10 +197,10 @@ func TestBuildStatefulSet(t *testing.T) { Image: "quay.io/coreos/etcd:v3.5.15", Resources: corev1.ResourceRequirements{}, Env: buildContainerEnv( - "etcd-custom", + "toposerver-custom", "test", 5, - "etcd-custom-headless", + "toposerver-custom-headless", ), Ports: buildContainerPorts(nil), VolumeMounts: []corev1.VolumeMount{ @@ -250,168 +235,62 @@ func TestBuildStatefulSet(t *testing.T) { }, }, }, - "custom storage size": { - etcd: &multigresv1alpha1.Etcd{ + "custom VolumeClaimTemplate": { + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", UID: "test-uid", }, - Spec: multigresv1alpha1.EtcdSpec{ - StorageSize: "20Gi", - }, - }, - scheme: scheme, - want: &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - Labels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-etcd", - "app.kubernetes.io/component": "etcd", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "multigres.com/v1alpha1", - Kind: "Etcd", - Name: "test-etcd", - UID: "test-uid", - Controller: boolPtr(true), - BlockOwnerDeletion: boolPtr(true), - }, - }, - }, - Spec: appsv1.StatefulSetSpec{ - ServiceName: "test-etcd-headless", - Replicas: int32Ptr(3), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-etcd", - "app.kubernetes.io/component": "etcd", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - }, - PodManagementPolicy: appsv1.ParallelPodManagement, - UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ - Type: appsv1.RollingUpdateStatefulSetStrategyType, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-etcd", - "app.kubernetes.io/component": "etcd", - "app.kubernetes.io/part-of": "multigres", - "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "etcd", - Image: DefaultImage, - Resources: corev1.ResourceRequirements{}, - Env: buildContainerEnv( - "test-etcd", - "default", - 3, - "test-etcd-headless", - ), - Ports: buildContainerPorts(nil), - VolumeMounts: []corev1.VolumeMount{ - { - Name: DataVolumeName, - MountPath: DataMountPath, - }, - }, - }, + Spec: multigresv1alpha1.TopoServerChildSpec{ + TopoServerSpec: multigresv1alpha1.TopoServerSpec{ + DataVolumeClaimTemplate: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany, }, - }, - }, - VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: DataVolumeName, - }, - Spec: corev1.PersistentVolumeClaimSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("50Gi"), }, - Resources: corev1.VolumeResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("20Gi"), - }, - }, - }, - }, - }, - }, - }, - }, - "custom VolumeClaimTemplate": { - etcd: &multigresv1alpha1.Etcd{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", - Namespace: "default", - UID: "test-uid", - }, - Spec: multigresv1alpha1.EtcdSpec{ - VolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteMany, - }, - Resources: corev1.VolumeResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("50Gi"), }, + StorageClassName: ptr.To("fast-ssd"), }, - StorageClassName: stringPtr("fast-ssd"), }, }, }, scheme: scheme, want: &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", Labels: map[string]string{ "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-etcd", - "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/instance": "test-toposerver", + "app.kubernetes.io/component": "toposerver", "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", }, OwnerReferences: []metav1.OwnerReference{ { APIVersion: "multigres.com/v1alpha1", - Kind: "Etcd", - Name: "test-etcd", + Kind: "TopoServer", + Name: "test-toposerver", UID: "test-uid", - Controller: boolPtr(true), - BlockOwnerDeletion: boolPtr(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, }, }, Spec: appsv1.StatefulSetSpec{ - ServiceName: "test-etcd-headless", - Replicas: int32Ptr(3), + ServiceName: "test-toposerver-headless", + Replicas: ptr.To(int32(3)), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-etcd", - "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/instance": "test-toposerver", + "app.kubernetes.io/component": "toposerver", "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", }, }, PodManagementPolicy: appsv1.ParallelPodManagement, @@ -422,11 +301,10 @@ func TestBuildStatefulSet(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ "app.kubernetes.io/name": "multigres", - "app.kubernetes.io/instance": "test-etcd", - "app.kubernetes.io/component": "etcd", + "app.kubernetes.io/instance": "test-toposerver", + "app.kubernetes.io/component": "toposerver", "app.kubernetes.io/part-of": "multigres", "app.kubernetes.io/managed-by": "multigres-operator", - "multigres.com/cell": "multigres-global-topo", }, }, Spec: corev1.PodSpec{ @@ -436,10 +314,10 @@ func TestBuildStatefulSet(t *testing.T) { Image: DefaultImage, Resources: corev1.ResourceRequirements{}, Env: buildContainerEnv( - "test-etcd", + "test-toposerver", "default", 3, - "test-etcd-headless", + "test-toposerver-headless", ), Ports: buildContainerPorts(nil), VolumeMounts: []corev1.VolumeMount{ @@ -466,7 +344,7 @@ func TestBuildStatefulSet(t *testing.T) { corev1.ResourceStorage: resource.MustParse("50Gi"), }, }, - StorageClassName: stringPtr("fast-ssd"), + StorageClassName: ptr.To("fast-ssd"), }, }, }, @@ -474,12 +352,12 @@ func TestBuildStatefulSet(t *testing.T) { }, }, "scheme with incorrect type - should error": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, scheme: runtime.NewScheme(), // empty scheme with incorrect type wantErr: true, @@ -488,7 +366,7 @@ func TestBuildStatefulSet(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - got, err := BuildStatefulSet(tc.etcd, tc.scheme) + got, err := BuildStatefulSet(tc.toposerver, tc.scheme) if (err != nil) != tc.wantErr { t.Errorf("BuildStatefulSet() error = %v, wantErr %v", err, tc.wantErr) diff --git a/pkg/resource-handler/controller/etcd/etcd_controller.go b/pkg/resource-handler/controller/toposerver/toposerver_controller.go similarity index 61% rename from pkg/resource-handler/controller/etcd/etcd_controller.go rename to pkg/resource-handler/controller/toposerver/toposerver_controller.go index e9bd820b..eaa6f3a6 100644 --- a/pkg/resource-handler/controller/etcd/etcd_controller.go +++ b/pkg/resource-handler/controller/toposerver/toposerver_controller.go @@ -1,4 +1,4 @@ -package etcd +package toposerver import ( "context" @@ -19,70 +19,67 @@ import ( ) const ( - finalizerName = "etcd.multigres.com/finalizer" + finalizerName = "toposerver.multigres.com/finalizer" ) -// EtcdReconciler reconciles an Etcd object. -type EtcdReconciler struct { +// TopoServerReconciler reconciles a TopoServer object. +type TopoServerReconciler struct { client.Client Scheme *runtime.Scheme } -// +kubebuilder:rbac:groups=multigres.com,resources=etcds,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=multigres.com,resources=etcds/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=multigres.com,resources=etcds/finalizers,verbs=update -// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete - -// Reconcile handles Etcd resource reconciliation. -func (r *EtcdReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +// Reconcile handles TopoServer resource reconciliation. +func (r *TopoServerReconciler) Reconcile( + ctx context.Context, + req ctrl.Request, +) (ctrl.Result, error) { logger := log.FromContext(ctx) - // Fetch the Etcd instance - etcd := &multigresv1alpha1.Etcd{} - if err := r.Get(ctx, req.NamespacedName, etcd); err != nil { + // Fetch the TopoServer instance + toposerver := &multigresv1alpha1.TopoServer{} + if err := r.Get(ctx, req.NamespacedName, toposerver); err != nil { if errors.IsNotFound(err) { - logger.Info("Etcd resource not found, ignoring") + logger.Info("TopoServer resource not found, ignoring") return ctrl.Result{}, nil } - logger.Error(err, "Failed to get Etcd") + logger.Error(err, "Failed to get TopoServer") return ctrl.Result{}, err } // Handle deletion - if !etcd.DeletionTimestamp.IsZero() { - return r.handleDeletion(ctx, etcd) + if !toposerver.DeletionTimestamp.IsZero() { + return r.handleDeletion(ctx, toposerver) } // Add finalizer if not present - if !slices.Contains(etcd.Finalizers, finalizerName) { - etcd.Finalizers = append(etcd.Finalizers, finalizerName) - if err := r.Update(ctx, etcd); err != nil { + if !slices.Contains(toposerver.Finalizers, finalizerName) { + toposerver.Finalizers = append(toposerver.Finalizers, finalizerName) + if err := r.Update(ctx, toposerver); err != nil { logger.Error(err, "Failed to add finalizer") return ctrl.Result{}, err } } // Reconcile StatefulSet - if err := r.reconcileStatefulSet(ctx, etcd); err != nil { + if err := r.reconcileStatefulSet(ctx, toposerver); err != nil { logger.Error(err, "Failed to reconcile StatefulSet") return ctrl.Result{}, err } // Reconcile headless Service - if err := r.reconcileHeadlessService(ctx, etcd); err != nil { + if err := r.reconcileHeadlessService(ctx, toposerver); err != nil { logger.Error(err, "Failed to reconcile headless Service") return ctrl.Result{}, err } // Reconcile client Service - if err := r.reconcileClientService(ctx, etcd); err != nil { + if err := r.reconcileClientService(ctx, toposerver); err != nil { logger.Error(err, "Failed to reconcile client Service") return ctrl.Result{}, err } // Update status - if err := r.updateStatus(ctx, etcd); err != nil { + if err := r.updateStatus(ctx, toposerver); err != nil { logger.Error(err, "Failed to update status") return ctrl.Result{}, err } @@ -90,42 +87,22 @@ func (r *EtcdReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. return ctrl.Result{}, nil } -// handleDeletion handles cleanup when Etcd is being deleted. -func (r *EtcdReconciler) handleDeletion( - ctx context.Context, - etcd *multigresv1alpha1.Etcd, -) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - if slices.Contains(etcd.Finalizers, finalizerName) { - // Perform cleanup if needed - // Currently no special cleanup required - owner references handle resource deletion - - // Remove finalizer - etcd.Finalizers = slices.DeleteFunc(etcd.Finalizers, func(s string) bool { - return s == finalizerName - }) - if err := r.Update(ctx, etcd); err != nil { - logger.Error(err, "Failed to remove finalizer") - return ctrl.Result{}, err - } - } - - return ctrl.Result{}, nil -} - -// reconcileStatefulSet creates or updates the StatefulSet for Etcd. -func (r *EtcdReconciler) reconcileStatefulSet( +// reconcileStatefulSet creates or updates the StatefulSet for TopoServer. +func (r *TopoServerReconciler) reconcileStatefulSet( ctx context.Context, - etcd *multigresv1alpha1.Etcd, + toposerver *multigresv1alpha1.TopoServer, ) error { - desired, err := BuildStatefulSet(etcd, r.Scheme) + desired, err := BuildStatefulSet(toposerver, r.Scheme) if err != nil { return fmt.Errorf("failed to build StatefulSet: %w", err) } existing := &appsv1.StatefulSet{} - err = r.Get(ctx, client.ObjectKey{Namespace: etcd.Namespace, Name: etcd.Name}, existing) + err = r.Get( + ctx, + client.ObjectKey{Namespace: toposerver.Namespace, Name: toposerver.Name}, + existing, + ) if err != nil { if errors.IsNotFound(err) { // Create new StatefulSet @@ -147,12 +124,12 @@ func (r *EtcdReconciler) reconcileStatefulSet( return nil } -// reconcileHeadlessService creates or updates the headless Service for Etcd. -func (r *EtcdReconciler) reconcileHeadlessService( +// reconcileHeadlessService creates or updates the headless Service for TopoServer. +func (r *TopoServerReconciler) reconcileHeadlessService( ctx context.Context, - etcd *multigresv1alpha1.Etcd, + toposerver *multigresv1alpha1.TopoServer, ) error { - desired, err := BuildHeadlessService(etcd, r.Scheme) + desired, err := BuildHeadlessService(toposerver, r.Scheme) if err != nil { return fmt.Errorf("failed to build headless Service: %w", err) } @@ -160,7 +137,7 @@ func (r *EtcdReconciler) reconcileHeadlessService( existing := &corev1.Service{} err = r.Get( ctx, - client.ObjectKey{Namespace: etcd.Namespace, Name: etcd.Name + "-headless"}, + client.ObjectKey{Namespace: toposerver.Namespace, Name: toposerver.Name + "-headless"}, existing, ) if err != nil { @@ -185,18 +162,22 @@ func (r *EtcdReconciler) reconcileHeadlessService( return nil } -// reconcileClientService creates or updates the client Service for Etcd. -func (r *EtcdReconciler) reconcileClientService( +// reconcileClientService creates or updates the client Service for TopoServer. +func (r *TopoServerReconciler) reconcileClientService( ctx context.Context, - etcd *multigresv1alpha1.Etcd, + toposerver *multigresv1alpha1.TopoServer, ) error { - desired, err := BuildClientService(etcd, r.Scheme) + desired, err := BuildClientService(toposerver, r.Scheme) if err != nil { return fmt.Errorf("failed to build client Service: %w", err) } existing := &corev1.Service{} - err = r.Get(ctx, client.ObjectKey{Namespace: etcd.Namespace, Name: etcd.Name}, existing) + err = r.Get( + ctx, + client.ObjectKey{Namespace: toposerver.Namespace, Name: toposerver.Name}, + existing, + ) if err != nil { if errors.IsNotFound(err) { // Create new Service @@ -219,11 +200,14 @@ func (r *EtcdReconciler) reconcileClientService( return nil } -// updateStatus updates the Etcd status based on observed state. -func (r *EtcdReconciler) updateStatus(ctx context.Context, etcd *multigresv1alpha1.Etcd) error { +// updateStatus updates the TopoServer status based on observed state. +func (r *TopoServerReconciler) updateStatus( + ctx context.Context, + toposerver *multigresv1alpha1.TopoServer, +) error { // Get the StatefulSet to check status sts := &appsv1.StatefulSet{} - err := r.Get(ctx, client.ObjectKey{Namespace: etcd.Namespace, Name: etcd.Name}, sts) + err := r.Get(ctx, client.ObjectKey{Namespace: toposerver.Namespace, Name: toposerver.Name}, sts) if err != nil { if errors.IsNotFound(err) { // StatefulSet not created yet @@ -233,15 +217,18 @@ func (r *EtcdReconciler) updateStatus(ctx context.Context, etcd *multigresv1alph } // Update status fields - etcd.Status.Replicas = sts.Status.Replicas - etcd.Status.ReadyReplicas = sts.Status.ReadyReplicas - etcd.Status.Ready = sts.Status.ReadyReplicas == sts.Status.Replicas && sts.Status.Replicas > 0 - etcd.Status.ObservedGeneration = etcd.Generation + toposerver.Status.Replicas = sts.Status.Replicas + toposerver.Status.ReadyReplicas = sts.Status.ReadyReplicas + toposerver.Status.ObservedGeneration = toposerver.Generation + + // Set service names + toposerver.Status.ClientServiceName = toposerver.Name + toposerver.Status.PeerServiceName = toposerver.Name + "-headless" // Update conditions - etcd.Status.Conditions = r.buildConditions(etcd, sts) + toposerver.Status.Conditions = r.buildConditions(toposerver, sts) - if err := r.Status().Update(ctx, etcd); err != nil { + if err := r.Status().Update(ctx, toposerver); err != nil { return fmt.Errorf("failed to update status: %w", err) } @@ -249,8 +236,8 @@ func (r *EtcdReconciler) updateStatus(ctx context.Context, etcd *multigresv1alph } // buildConditions creates status conditions based on observed state. -func (r *EtcdReconciler) buildConditions( - etcd *multigresv1alpha1.Etcd, +func (r *TopoServerReconciler) buildConditions( + toposerver *multigresv1alpha1.TopoServer, sts *appsv1.StatefulSet, ) []metav1.Condition { conditions := []metav1.Condition{} @@ -258,7 +245,7 @@ func (r *EtcdReconciler) buildConditions( // Ready condition readyCondition := metav1.Condition{ Type: "Ready", - ObservedGeneration: etcd.Generation, + ObservedGeneration: toposerver.Generation, LastTransitionTime: metav1.Now(), } @@ -276,15 +263,42 @@ func (r *EtcdReconciler) buildConditions( return conditions } +// handleDeletion handles cleanup when TopoServer is being deleted. +func (r *TopoServerReconciler) handleDeletion( + ctx context.Context, + toposerver *multigresv1alpha1.TopoServer, +) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + if slices.Contains(toposerver.Finalizers, finalizerName) { + // Perform cleanup if needed + // Currently no special cleanup required - owner references handle resource deletion + + // Remove finalizer + toposerver.Finalizers = slices.DeleteFunc(toposerver.Finalizers, func(s string) bool { + return s == finalizerName + }) + if err := r.Update(ctx, toposerver); err != nil { + logger.Error(err, "Failed to remove finalizer") + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil +} + // SetupWithManager sets up the controller with the Manager. -func (r *EtcdReconciler) SetupWithManager(mgr ctrl.Manager, opts ...controller.Options) error { +func (r *TopoServerReconciler) SetupWithManager( + mgr ctrl.Manager, + opts ...controller.Options, +) error { controllerOpts := controller.Options{} if len(opts) > 0 { controllerOpts = opts[0] } return ctrl.NewControllerManagedBy(mgr). - For(&multigresv1alpha1.Etcd{}). + For(&multigresv1alpha1.TopoServer{}). Owns(&appsv1.StatefulSet{}). Owns(&corev1.Service{}). WithOptions(controllerOpts). diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_internal_test.go b/pkg/resource-handler/controller/toposerver/toposerver_controller_internal_test.go similarity index 71% rename from pkg/resource-handler/controller/etcd/etcd_controller_internal_test.go rename to pkg/resource-handler/controller/toposerver/toposerver_controller_internal_test.go index 3e81a905..5a7b0946 100644 --- a/pkg/resource-handler/controller/etcd/etcd_controller_internal_test.go +++ b/pkg/resource-handler/controller/toposerver/toposerver_controller_internal_test.go @@ -1,4 +1,4 @@ -package etcd +package toposerver import ( "context" @@ -11,34 +11,34 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" - "github.com/numtide/multigres-operator/pkg/resource-handler/controller/testutil" + "github.com/numtide/multigres-operator/pkg/testutil" ) // TestReconcileStatefulSet_InvalidScheme tests the error path when BuildStatefulSet fails. // This should never happen in production - scheme is properly set up in main.go. // Test exists for coverage of defensive error handling. func TestReconcileStatefulSet_InvalidScheme(t *testing.T) { - // Empty scheme without Etcd type registered + // Empty scheme without TopoServer type registered invalidScheme := runtime.NewScheme() - etcd := &multigresv1alpha1.Etcd{ + toposerver := &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, } fakeClient := fake.NewClientBuilder(). WithScheme(invalidScheme). Build() - reconciler := &EtcdReconciler{ + reconciler := &TopoServerReconciler{ Client: fakeClient, Scheme: invalidScheme, } - err := reconciler.reconcileStatefulSet(context.Background(), etcd) + err := reconciler.reconcileStatefulSet(context.Background(), toposerver) if err == nil { t.Error("reconcileStatefulSet() should error with invalid scheme") } @@ -48,24 +48,24 @@ func TestReconcileStatefulSet_InvalidScheme(t *testing.T) { func TestReconcileHeadlessService_InvalidScheme(t *testing.T) { invalidScheme := runtime.NewScheme() - etcd := &multigresv1alpha1.Etcd{ + toposerver := &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, } fakeClient := fake.NewClientBuilder(). WithScheme(invalidScheme). Build() - reconciler := &EtcdReconciler{ + reconciler := &TopoServerReconciler{ Client: fakeClient, Scheme: invalidScheme, } - err := reconciler.reconcileHeadlessService(context.Background(), etcd) + err := reconciler.reconcileHeadlessService(context.Background(), toposerver) if err == nil { t.Error("reconcileHeadlessService() should error with invalid scheme") } @@ -75,24 +75,24 @@ func TestReconcileHeadlessService_InvalidScheme(t *testing.T) { func TestReconcileClientService_InvalidScheme(t *testing.T) { invalidScheme := runtime.NewScheme() - etcd := &multigresv1alpha1.Etcd{ + toposerver := &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, } fakeClient := fake.NewClientBuilder(). WithScheme(invalidScheme). Build() - reconciler := &EtcdReconciler{ + reconciler := &TopoServerReconciler{ Client: fakeClient, Scheme: invalidScheme, } - err := reconciler.reconcileClientService(context.Background(), etcd) + err := reconciler.reconcileClientService(context.Background(), toposerver) if err == nil { t.Error("reconcileClientService() should error with invalid scheme") } @@ -104,27 +104,27 @@ func TestUpdateStatus_StatefulSetNotFound(t *testing.T) { _ = multigresv1alpha1.AddToScheme(scheme) _ = appsv1.AddToScheme(scheme) // Need StatefulSet type registered for Get to work - etcd := &multigresv1alpha1.Etcd{ + toposerver := &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, } fakeClient := fake.NewClientBuilder(). WithScheme(scheme). - WithObjects(etcd). - WithStatusSubresource(&multigresv1alpha1.Etcd{}). + WithObjects(toposerver). + WithStatusSubresource(&multigresv1alpha1.TopoServer{}). Build() - reconciler := &EtcdReconciler{ + reconciler := &TopoServerReconciler{ Client: fakeClient, Scheme: scheme, } // Call updateStatus when StatefulSet doesn't exist yet - err := reconciler.updateStatus(context.Background(), etcd) + err := reconciler.updateStatus(context.Background(), toposerver) if err != nil { t.Errorf("updateStatus() should not error when StatefulSet not found, got: %v", err) } @@ -135,26 +135,26 @@ func TestHandleDeletion_NoFinalizer(t *testing.T) { scheme := runtime.NewScheme() _ = multigresv1alpha1.AddToScheme(scheme) - etcd := &multigresv1alpha1.Etcd{ + toposerver := &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", Finalizers: []string{}, // No finalizer }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, } fakeClient := fake.NewClientBuilder(). WithScheme(scheme). - WithObjects(etcd). + WithObjects(toposerver). Build() - reconciler := &EtcdReconciler{ + reconciler := &TopoServerReconciler{ Client: fakeClient, Scheme: scheme, } - result, err := reconciler.handleDeletion(context.Background(), etcd) + result, err := reconciler.handleDeletion(context.Background(), toposerver) if err != nil { t.Errorf("handleDeletion() should not error when no finalizer, got: %v", err) } @@ -170,30 +170,30 @@ func TestReconcileClientService_GetError(t *testing.T) { _ = appsv1.AddToScheme(scheme) _ = corev1.AddToScheme(scheme) - etcd := &multigresv1alpha1.Etcd{ + toposerver := &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, } // Create client with failure injection baseClient := fake.NewClientBuilder(). WithScheme(scheme). - WithObjects(etcd). + WithObjects(toposerver). Build() fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ - OnGet: testutil.FailOnKeyName("test-etcd", testutil.ErrNetworkTimeout), + OnGet: testutil.FailOnKeyName("test-toposerver", testutil.ErrNetworkTimeout), }) - reconciler := &EtcdReconciler{ + reconciler := &TopoServerReconciler{ Client: fakeClient, Scheme: scheme, } - err := reconciler.reconcileClientService(context.Background(), etcd) + err := reconciler.reconcileClientService(context.Background(), toposerver) if err == nil { t.Error("reconcileClientService() should error on Get failure") } @@ -205,30 +205,30 @@ func TestUpdateStatus_GetError(t *testing.T) { _ = multigresv1alpha1.AddToScheme(scheme) _ = appsv1.AddToScheme(scheme) - etcd := &multigresv1alpha1.Etcd{ + toposerver := &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, } baseClient := fake.NewClientBuilder(). WithScheme(scheme). - WithObjects(etcd). - WithStatusSubresource(&multigresv1alpha1.Etcd{}). + WithObjects(toposerver). + WithStatusSubresource(&multigresv1alpha1.TopoServer{}). Build() fakeClient := testutil.NewFakeClientWithFailures(baseClient, &testutil.FailureConfig{ - OnGet: testutil.FailOnKeyName("test-etcd", testutil.ErrNetworkTimeout), + OnGet: testutil.FailOnKeyName("test-toposerver", testutil.ErrNetworkTimeout), }) - reconciler := &EtcdReconciler{ + reconciler := &TopoServerReconciler{ Client: fakeClient, Scheme: scheme, } - err := reconciler.updateStatus(context.Background(), etcd) + err := reconciler.updateStatus(context.Background(), toposerver) if err == nil { t.Error("updateStatus() should error on Get failure") } diff --git a/pkg/resource-handler/controller/etcd/etcd_controller_test.go b/pkg/resource-handler/controller/toposerver/toposerver_controller_test.go similarity index 59% rename from pkg/resource-handler/controller/etcd/etcd_controller_test.go rename to pkg/resource-handler/controller/toposerver/toposerver_controller_test.go index a8d6696b..f2b83461 100644 --- a/pkg/resource-handler/controller/etcd/etcd_controller_test.go +++ b/pkg/resource-handler/controller/toposerver/toposerver_controller_test.go @@ -1,4 +1,4 @@ -package etcd +package toposerver_test import ( "slices" @@ -9,15 +9,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" multigresv1alpha1 "github.com/numtide/multigres-operator/api/v1alpha1" - "github.com/numtide/multigres-operator/pkg/resource-handler/controller/testutil" + "github.com/numtide/multigres-operator/pkg/resource-handler/controller/toposerver" + "github.com/numtide/multigres-operator/pkg/testutil" ) -func TestEtcdReconciler_Reconcile(t *testing.T) { +func TestTopoServerReconciler_Reconcile(t *testing.T) { t.Parallel() scheme := runtime.NewScheme() @@ -26,7 +28,7 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { _ = corev1.AddToScheme(scheme) tests := map[string]struct { - etcd *multigresv1alpha1.Etcd + toposerver *multigresv1alpha1.TopoServer existingObjects []client.Object failureConfig *testutil.FailureConfig // TODO: If wantErr is false but failureConfig is set, assertions may fail @@ -34,81 +36,86 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { // partial failures that don't prevent reconciliation success. wantErr bool wantRequeue bool - assertFunc func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) + assertFunc func(t *testing.T, c client.Client, toposerver *multigresv1alpha1.TopoServer) }{ ////---------------------------------------- /// Success //------------------------------------------ - "create all resources for new Etcd": { - etcd: &multigresv1alpha1.Etcd{ + "create all resources for new TopoServer": { + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{}, - assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { + assertFunc: func(t *testing.T, c client.Client, toposerver *multigresv1alpha1.TopoServer) { // Verify all three resources were created sts := &appsv1.StatefulSet{} if err := c.Get(t.Context(), - types.NamespacedName{Name: "test-etcd", Namespace: "default"}, + types.NamespacedName{Name: "test-toposerver", Namespace: "default"}, sts); err != nil { t.Errorf("StatefulSet should exist: %v", err) } headlessSvc := &corev1.Service{} if err := c.Get(t.Context(), - types.NamespacedName{Name: "test-etcd-headless", Namespace: "default"}, + types.NamespacedName{Name: "test-toposerver-headless", Namespace: "default"}, headlessSvc); err != nil { t.Errorf("Headless Service should exist: %v", err) } clientSvc := &corev1.Service{} if err := c.Get(t.Context(), - types.NamespacedName{Name: "test-etcd", Namespace: "default"}, + types.NamespacedName{Name: "test-toposerver", Namespace: "default"}, clientSvc); err != nil { t.Errorf("Client Service should exist: %v", err) } // Verify defaults and finalizer - if *sts.Spec.Replicas != DefaultReplicas { + if *sts.Spec.Replicas != int32(3) { t.Errorf( "StatefulSet replicas = %d, want %d", *sts.Spec.Replicas, - DefaultReplicas, + int32(3), ) } - updatedEtcd := &multigresv1alpha1.Etcd{} - if err := c.Get(t.Context(), types.NamespacedName{Name: "test-etcd", Namespace: "default"}, updatedEtcd); err != nil { - t.Fatalf("Failed to get Etcd: %v", err) + updatedTopoServer := &multigresv1alpha1.TopoServer{} + if err := c.Get(t.Context(), types.NamespacedName{Name: "test-toposerver", Namespace: "default"}, updatedTopoServer); err != nil { + t.Fatalf("Failed to get TopoServer: %v", err) } - if !slices.Contains(updatedEtcd.Finalizers, finalizerName) { + if !slices.Contains( + updatedTopoServer.Finalizers, + "toposerver.multigres.com/finalizer", + ) { t.Errorf("Finalizer should be added") } }, }, "update existing resources": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "existing-etcd", + Name: "existing-toposerver", Namespace: "default", - Finalizers: []string{finalizerName}, + Finalizers: []string{"toposerver.multigres.com/finalizer"}, }, - Spec: multigresv1alpha1.EtcdSpec{ - Replicas: int32Ptr(5), - Image: "quay.io/coreos/etcd:v3.5.15", + Spec: multigresv1alpha1.TopoServerChildSpec{ + TopoServerSpec: multigresv1alpha1.TopoServerSpec{ + Replicas: ptr.To(int32(5)), + Image: "quay.io/coreos/etcd:v3.5.15", + }, }, }, existingObjects: []client.Object{ &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "existing-etcd", + Name: "existing-toposerver", Namespace: "default", }, Spec: appsv1.StatefulSetSpec{ - Replicas: int32Ptr(3), // will be updated to 5 + Replicas: ptr.To(int32(3)), // will be updated to 5 }, Status: appsv1.StatefulSetStatus{ Replicas: 3, @@ -117,21 +124,21 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }, &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "existing-etcd-headless", + Name: "existing-toposerver-headless", Namespace: "default", }, }, &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "existing-etcd", + Name: "existing-toposerver", Namespace: "default", }, }, }, - assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { + assertFunc: func(t *testing.T, c client.Client, toposerver *multigresv1alpha1.TopoServer) { sts := &appsv1.StatefulSet{} err := c.Get(t.Context(), types.NamespacedName{ - Name: "existing-etcd", + Name: "existing-toposerver", Namespace: "default", }, sts) if err != nil { @@ -150,111 +157,61 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { } }, }, - "etcd with cellName": { - etcd: &multigresv1alpha1.Etcd{ - ObjectMeta: metav1.ObjectMeta{ - Name: "etcd-zone1", - Namespace: "default", - }, - Spec: multigresv1alpha1.EtcdSpec{ - CellName: "zone1", - }, - }, - existingObjects: []client.Object{}, - assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { - sts := &appsv1.StatefulSet{} - if err := c.Get(t.Context(), - types.NamespacedName{Name: "etcd-zone1", Namespace: "default"}, - sts); err != nil { - t.Fatalf("Failed to get StatefulSet: %v", err) - } - if sts.Labels["multigres.com/cell"] != "zone1" { - t.Errorf( - "StatefulSet cell label = %s, want zone1", - sts.Labels["multigres.com/cell"], - ) - } - - headlessSvc := &corev1.Service{} - if err := c.Get(t.Context(), - types.NamespacedName{Name: "etcd-zone1-headless", Namespace: "default"}, - headlessSvc); err != nil { - t.Fatalf("Failed to get headless Service: %v", err) - } - if headlessSvc.Labels["multigres.com/cell"] != "zone1" { - t.Errorf( - "Headless Service cell label = %s, want zone1", - headlessSvc.Labels["multigres.com/cell"], - ) - } - - clientSvc := &corev1.Service{} - if err := c.Get(t.Context(), - types.NamespacedName{Name: "etcd-zone1", Namespace: "default"}, - clientSvc); err != nil { - t.Fatalf("Failed to get client Service: %v", err) - } - if clientSvc.Labels["multigres.com/cell"] != "zone1" { - t.Errorf( - "Client Service cell label = %s, want zone1", - clientSvc.Labels["multigres.com/cell"], - ) - } - }, - }, "deletion with finalizer": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-deletion", + Name: "test-toposerver-deletion", Namespace: "default", DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, - Finalizers: []string{finalizerName}, + Finalizers: []string{"toposerver.multigres.com/finalizer"}, }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{ - &multigresv1alpha1.Etcd{ + &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-deletion", + Name: "test-toposerver-deletion", Namespace: "default", DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, - Finalizers: []string{finalizerName}, + Finalizers: []string{"toposerver.multigres.com/finalizer"}, }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, }, - assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { - updatedEtcd := &multigresv1alpha1.Etcd{} + assertFunc: func(t *testing.T, c client.Client, toposerver *multigresv1alpha1.TopoServer) { + updatedTopoServer := &multigresv1alpha1.TopoServer{} err := c.Get(t.Context(), - types.NamespacedName{Name: "test-etcd-deletion", Namespace: "default"}, - updatedEtcd) + types.NamespacedName{Name: "test-toposerver-deletion", Namespace: "default"}, + updatedTopoServer) if err == nil { t.Errorf( - "Etcd object should be deleted but still exists (finalizers: %v)", - updatedEtcd.Finalizers, + "TopoServer object should be deleted but still exists (finalizers: %v)", + updatedTopoServer.Finalizers, ) } }, }, "all replicas ready status": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-ready", + Name: "test-toposerver-ready", Namespace: "default", - Finalizers: []string{finalizerName}, + Finalizers: []string{"toposerver.multigres.com/finalizer"}, }, - Spec: multigresv1alpha1.EtcdSpec{ - Replicas: int32Ptr(3), + Spec: multigresv1alpha1.TopoServerChildSpec{ + TopoServerSpec: multigresv1alpha1.TopoServerSpec{ + Replicas: ptr.To(int32(3)), + }, }, }, existingObjects: []client.Object{ &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-ready", + Name: "test-toposerver-ready", Namespace: "default", }, Spec: appsv1.StatefulSetSpec{ - Replicas: int32Ptr(3), + Replicas: ptr.To(int32(3)), }, Status: appsv1.StatefulSetStatus{ Replicas: 3, @@ -262,27 +219,27 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { }, }, }, - assertFunc: func(t *testing.T, c client.Client, etcd *multigresv1alpha1.Etcd) { - updatedEtcd := &multigresv1alpha1.Etcd{} + assertFunc: func(t *testing.T, c client.Client, toposerver *multigresv1alpha1.TopoServer) { + updatedTopoServer := &multigresv1alpha1.TopoServer{} if err := c.Get(t.Context(), - types.NamespacedName{Name: "test-etcd-ready", Namespace: "default"}, - updatedEtcd); err != nil { - t.Fatalf("Failed to get Etcd: %v", err) + types.NamespacedName{Name: "test-toposerver-ready", Namespace: "default"}, + updatedTopoServer); err != nil { + t.Fatalf("Failed to get TopoServer: %v", err) } - if !updatedEtcd.Status.Ready { - t.Error("Status.Ready should be true") + if updatedTopoServer.Status.Replicas != 3 { + t.Errorf("Status.Replicas = %d, want 3", updatedTopoServer.Status.Replicas) } - if updatedEtcd.Status.Replicas != 3 { - t.Errorf("Status.Replicas = %d, want 3", updatedEtcd.Status.Replicas) - } - if updatedEtcd.Status.ReadyReplicas != 3 { - t.Errorf("Status.ReadyReplicas = %d, want 3", updatedEtcd.Status.ReadyReplicas) + if updatedTopoServer.Status.ReadyReplicas != 3 { + t.Errorf( + "Status.ReadyReplicas = %d, want 3", + updatedTopoServer.Status.ReadyReplicas, + ) } - if len(updatedEtcd.Status.Conditions) == 0 { + if len(updatedTopoServer.Status.Conditions) == 0 { t.Error("Status.Conditions should not be empty") } else { - readyCondition := updatedEtcd.Status.Conditions[0] + readyCondition := updatedTopoServer.Status.Conditions[0] if readyCondition.Type != "Ready" { t.Errorf("Condition type = %s, want Ready", readyCondition.Type) } @@ -291,7 +248,10 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { } } - if !slices.Contains(updatedEtcd.Finalizers, finalizerName) { + if !slices.Contains( + updatedTopoServer.Finalizers, + "toposerver.multigres.com/finalizer", + ) { t.Errorf("Finalizer should be present") } }, @@ -300,32 +260,32 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { /// Error //------------------------------------------ "error on status update": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{}, failureConfig: &testutil.FailureConfig{ - OnStatusUpdate: testutil.FailOnObjectName("test-etcd", testutil.ErrInjected), + OnStatusUpdate: testutil.FailOnObjectName("test-toposerver", testutil.ErrInjected), }, wantErr: true, }, "error on Get StatefulSet in updateStatus (network error)": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-status", + Name: "test-toposerver-status", Namespace: "default", - Finalizers: []string{finalizerName}, + Finalizers: []string{"toposerver.multigres.com/finalizer"}, }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{ &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-status", + Name: "test-toposerver-status", Namespace: "default", }, }, @@ -339,17 +299,17 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { wantErr: true, }, "error on client Service create": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{}, failureConfig: &testutil.FailureConfig{ OnCreate: func(obj client.Object) error { - if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-etcd" { + if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-toposerver" { return testutil.ErrPermissionError } return nil @@ -358,37 +318,37 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { wantErr: true, }, "error on client Service Update": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", - Finalizers: []string{finalizerName}, + Finalizers: []string{"toposerver.multigres.com/finalizer"}, }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{ &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, }, &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-headless", + Name: "test-toposerver-headless", Namespace: "default", }, }, &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, }, }, failureConfig: &testutil.FailureConfig{ OnUpdate: func(obj client.Object) error { - if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-etcd" { + if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-toposerver" { return testutil.ErrInjected } return nil @@ -397,31 +357,31 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { wantErr: true, }, "error on Get client Service (network error)": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-svc", + Name: "test-toposerver-svc", Namespace: "default", - Finalizers: []string{finalizerName}, + Finalizers: []string{"toposerver.multigres.com/finalizer"}, }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{ &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-svc", + Name: "test-toposerver-svc", Namespace: "default", }, }, &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-svc-headless", + Name: "test-toposerver-svc-headless", Namespace: "default", }, }, }, failureConfig: &testutil.FailureConfig{ OnGet: testutil.FailOnNamespacedKeyName( - "test-etcd-svc", + "test-toposerver-svc", "default", testutil.ErrNetworkTimeout, ), @@ -429,17 +389,18 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { wantErr: true, }, "error on headless Service create": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{}, failureConfig: &testutil.FailureConfig{ OnCreate: func(obj client.Object) error { - if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-etcd-headless" { + if svc, ok := obj.(*corev1.Service); ok && + svc.Name == "test-toposerver-headless" { return testutil.ErrPermissionError } return nil @@ -448,31 +409,32 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { wantErr: true, }, "error on headless Service Update": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", - Finalizers: []string{finalizerName}, + Finalizers: []string{"toposerver.multigres.com/finalizer"}, }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{ &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, }, &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-headless", + Name: "test-toposerver-headless", Namespace: "default", }, }, }, failureConfig: &testutil.FailureConfig{ OnUpdate: func(obj client.Object) error { - if svc, ok := obj.(*corev1.Service); ok && svc.Name == "test-etcd-headless" { + if svc, ok := obj.(*corev1.Service); ok && + svc.Name == "test-toposerver-headless" { return testutil.ErrInjected } return nil @@ -481,25 +443,25 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { wantErr: true, }, "error on Get headless Service (network error)": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", - Finalizers: []string{finalizerName}, + Finalizers: []string{"toposerver.multigres.com/finalizer"}, }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{ &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, }, }, failureConfig: &testutil.FailureConfig{ OnGet: func(key client.ObjectKey) error { - if key.Name == "test-etcd-headless" { + if key.Name == "test-toposerver-headless" { return testutil.ErrNetworkTimeout } return nil @@ -508,12 +470,12 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { wantErr: true, }, "error on StatefulSet create": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{}, failureConfig: &testutil.FailureConfig{ @@ -527,24 +489,26 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { wantErr: true, }, "error on StatefulSet Update": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", - Finalizers: []string{finalizerName}, + Finalizers: []string{"toposerver.multigres.com/finalizer"}, }, - Spec: multigresv1alpha1.EtcdSpec{ - Replicas: int32Ptr(5), + Spec: multigresv1alpha1.TopoServerChildSpec{ + TopoServerSpec: multigresv1alpha1.TopoServerSpec{ + Replicas: ptr.To(int32(5)), + }, }, }, existingObjects: []client.Object{ &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, Spec: appsv1.StatefulSetSpec{ - Replicas: int32Ptr(3), + Replicas: ptr.To(int32(3)), }, }, }, @@ -559,18 +523,18 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { wantErr: true, }, "error on Get StatefulSet (network error)": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", - Finalizers: []string{finalizerName}, + Finalizers: []string{"toposerver.multigres.com/finalizer"}, }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{}, failureConfig: &testutil.FailureConfig{ OnGet: func(key client.ObjectKey) error { - if key.Name == "test-etcd" { + if key.Name == "test-toposerver" { return testutil.ErrNetworkTimeout } return nil @@ -579,56 +543,56 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { wantErr: true, }, "error on finalizer Update": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{}, failureConfig: &testutil.FailureConfig{ - OnUpdate: testutil.FailOnObjectName("test-etcd", testutil.ErrInjected), + OnUpdate: testutil.FailOnObjectName("test-toposerver", testutil.ErrInjected), }, wantErr: true, }, "deletion error on finalizer removal": { - etcd: &multigresv1alpha1.Etcd{ + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-del", + Name: "test-toposerver-del", Namespace: "default", DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, - Finalizers: []string{finalizerName}, + Finalizers: []string{"toposerver.multigres.com/finalizer"}, }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{ - &multigresv1alpha1.Etcd{ + &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd-del", + Name: "test-toposerver-del", Namespace: "default", DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, - Finalizers: []string{finalizerName}, + Finalizers: []string{"toposerver.multigres.com/finalizer"}, }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, }, failureConfig: &testutil.FailureConfig{ - OnUpdate: testutil.FailOnObjectName("test-etcd-del", testutil.ErrInjected), + OnUpdate: testutil.FailOnObjectName("test-toposerver-del", testutil.ErrInjected), }, wantErr: true, }, - "error on Get Etcd (network error)": { - etcd: &multigresv1alpha1.Etcd{ + "error on Get TopoServer (network error)": { + toposerver: &multigresv1alpha1.TopoServer{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-etcd", + Name: "test-toposerver", Namespace: "default", }, - Spec: multigresv1alpha1.EtcdSpec{}, + Spec: multigresv1alpha1.TopoServerChildSpec{}, }, existingObjects: []client.Object{}, failureConfig: &testutil.FailureConfig{ - OnGet: testutil.FailOnKeyName("test-etcd", testutil.ErrNetworkTimeout), + OnGet: testutil.FailOnKeyName("test-toposerver", testutil.ErrNetworkTimeout), }, wantErr: true, }, @@ -642,7 +606,7 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { baseClient := fake.NewClientBuilder(). WithScheme(scheme). WithObjects(tc.existingObjects...). - WithStatusSubresource(&multigresv1alpha1.Etcd{}). + WithStatusSubresource(&multigresv1alpha1.TopoServer{}). Build() fakeClient := client.Client(baseClient) @@ -651,31 +615,32 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { fakeClient = testutil.NewFakeClientWithFailures(baseClient, tc.failureConfig) } - reconciler := &EtcdReconciler{ + reconciler := &toposerver.TopoServerReconciler{ Client: fakeClient, Scheme: scheme, } - // Create the Etcd resource if not in existing objects - etcdInExisting := false + // Create the TopoServer resource if not in existing objects + toposerverInExisting := false for _, obj := range tc.existingObjects { - if etcd, ok := obj.(*multigresv1alpha1.Etcd); ok && etcd.Name == tc.etcd.Name { - etcdInExisting = true + if toposerver, ok := obj.(*multigresv1alpha1.TopoServer); ok && + toposerver.Name == tc.toposerver.Name { + toposerverInExisting = true break } } - if !etcdInExisting { - err := fakeClient.Create(t.Context(), tc.etcd) + if !toposerverInExisting { + err := fakeClient.Create(t.Context(), tc.toposerver) if err != nil { - t.Fatalf("Failed to create Etcd: %v", err) + t.Fatalf("Failed to create TopoServer: %v", err) } } // Reconcile req := ctrl.Request{ NamespacedName: types.NamespacedName{ - Name: tc.etcd.Name, - Namespace: tc.etcd.Namespace, + Name: tc.toposerver.Name, + Namespace: tc.toposerver.Namespace, }, } @@ -697,13 +662,13 @@ func TestEtcdReconciler_Reconcile(t *testing.T) { // Run custom assertions if provided if tc.assertFunc != nil { - tc.assertFunc(t, fakeClient, tc.etcd) + tc.assertFunc(t, fakeClient, tc.toposerver) } }) } } -func TestEtcdReconciler_ReconcileNotFound(t *testing.T) { +func TestTopoServerReconciler_ReconcileNotFound(t *testing.T) { scheme := runtime.NewScheme() _ = multigresv1alpha1.AddToScheme(scheme) _ = appsv1.AddToScheme(scheme) @@ -713,7 +678,7 @@ func TestEtcdReconciler_ReconcileNotFound(t *testing.T) { WithScheme(scheme). Build() - reconciler := &EtcdReconciler{ + reconciler := &toposerver.TopoServerReconciler{ Client: fakeClient, Scheme: scheme, } @@ -721,7 +686,7 @@ func TestEtcdReconciler_ReconcileNotFound(t *testing.T) { // Reconcile non-existent resource req := ctrl.Request{ NamespacedName: types.NamespacedName{ - Name: "nonexistent-etcd", + Name: "nonexistent-toposerver", Namespace: "default", }, } diff --git a/pkg/resource-handler/go.mod b/pkg/resource-handler/go.mod index 6fb32708..b0be4516 100644 --- a/pkg/resource-handler/go.mod +++ b/pkg/resource-handler/go.mod @@ -4,63 +4,71 @@ go 1.25.0 require ( github.com/google/go-cmp v0.7.0 - github.com/numtide/multigres-operator/api v0.0.0-20251121230214-7690ea02d33a - k8s.io/api v0.34.1 - k8s.io/apimachinery v0.34.1 - k8s.io/client-go v0.34.1 - k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 - sigs.k8s.io/controller-runtime v0.22.3 + github.com/numtide/multigres-operator/api v0.0.0-20251122024957-4d8b69ea475b + github.com/numtide/multigres-operator/pkg/testutil v0.0.0-20251213002906-55493b734373 + k8s.io/api v0.34.3 + k8s.io/apimachinery v0.34.3 + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 + sigs.k8s.io/controller-runtime v0.22.4 ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-openapi/jsonpointer v0.22.4 // indirect + github.com/go-openapi/jsonreference v0.21.4 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/gnostic-models v0.7.1 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.22.0 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.67.4 // indirect + github.com/prometheus/procfs v0.19.2 // indirect + github.com/spf13/pflag v1.0.10 // indirect github.com/x448/float16 v0.8.4 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/net v0.38.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect - golang.org/x/time v0.9.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.36.5 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/oauth2 v0.34.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/term v0.38.0 // indirect + golang.org/x/text v0.32.0 // indirect + golang.org/x/time v0.14.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.34.1 // indirect + k8s.io/apiextensions-apiserver v0.34.3 // indirect + k8s.io/client-go v0.34.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.1 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/pkg/resource-handler/go.sum b/pkg/resource-handler/go.sum index a5314cfd..727cb852 100644 --- a/pkg/resource-handler/go.sum +++ b/pkg/resource-handler/go.sum @@ -2,12 +2,11 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= @@ -16,26 +15,52 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4= +github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80= +github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8= +github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= -github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c= +github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -45,25 +70,18 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -72,10 +90,12 @@ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFd github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/numtide/multigres-operator/api v0.0.0-20251121224426-e52b4dcf12a4 h1:dAn10WcNs54GOWj1dP5WbkN6MeoKrVc0ehJX4d1tNF8= -github.com/numtide/multigres-operator/api v0.0.0-20251121224426-e52b4dcf12a4/go.mod h1:A1bBmTxHr+362dGZ5G6u2S4xsP6enbgdUS/UJUOmKbc= -github.com/numtide/multigres-operator/api v0.0.0-20251121230214-7690ea02d33a h1:6dXoGDIM6NCE2VEcgswl0eSvrAfrlTkWtelUOAf9kJQ= -github.com/numtide/multigres-operator/api v0.0.0-20251121230214-7690ea02d33a/go.mod h1:A1bBmTxHr+362dGZ5G6u2S4xsP6enbgdUS/UJUOmKbc= +github.com/numtide/multigres-operator/api v0.0.0-20251122024957-4d8b69ea475b h1:RCEnXqK0D6jo9sSWsQ2kBspyVnv7FcfYFEO/Ovl1U74= +github.com/numtide/multigres-operator/api v0.0.0-20251122024957-4d8b69ea475b/go.mod h1:A1bBmTxHr+362dGZ5G6u2S4xsP6enbgdUS/UJUOmKbc= +github.com/numtide/multigres-operator/pkg/testutil v0.0.0-20251212191737-39bf45a780d3 h1:fg2eDv4bF2FpQjit7qfNEss27xm5nYmh5hDzBSwmFbQ= +github.com/numtide/multigres-operator/pkg/testutil v0.0.0-20251212191737-39bf45a780d3/go.mod h1:+NQa7dSvQqxhBOE9XcE9RWXLvOvNaw0keCc29Y7pjyQ= +github.com/numtide/multigres-operator/pkg/testutil v0.0.0-20251213002906-55493b734373 h1:B9uGjUsG0rMi+dGt2blEDpr8wbwnE/W1xcpuxZwvOYk= +github.com/numtide/multigres-operator/pkg/testutil v0.0.0-20251213002906-55493b734373/go.mod h1:+NQa7dSvQqxhBOE9XcE9RWXLvOvNaw0keCc29Y7pjyQ= github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= @@ -84,29 +104,24 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -117,8 +132,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -130,73 +145,72 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= -gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= +gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= -k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= -k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= -k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= -k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= -k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= -k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= +k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4= +k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk= +k8s.io/apiextensions-apiserver v0.34.3 h1:p10fGlkDY09eWKOTeUSioxwLukJnm+KuDZdrW71y40g= +k8s.io/apiextensions-apiserver v0.34.3/go.mod h1:aujxvqGFRdb/cmXYfcRTeppN7S2XV/t7WMEc64zB5A0= +k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE= +k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A= +k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.22.3 h1:I7mfqz/a/WdmDCEnXmSPm8/b/yRTy6JsKKENTijTq8Y= -sigs.k8s.io/controller-runtime v0.22.3/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e h1:iW9ChlU0cU16w8MpVYjXk12dqQ4BPFBEgif+ap7/hqQ= +k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= +sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.1 h1:JrhdFMqOd/+3ByqlP2I45kTOZmTRLBUm5pvRjeheg7E= +sigs.k8s.io/structured-merge-diff/v6 v6.3.1/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=