diff --git a/.buildkite/testsuite.yml b/.buildkite/testsuite.yml index f006c3acf..b65c50efb 100644 --- a/.buildkite/testsuite.yml +++ b/.buildkite/testsuite.yml @@ -213,49 +213,4 @@ steps: context: kuttl-v1 report-slowest: 10 soft_fail: true -- group: Kuttl-V1-Nodepools Tests - key: kuttl-v1-nodepools - steps: - - agents: - queue: k8s-m6id12xlarge - command: ./ci/scripts/run-in-nix-docker.sh task ci:configure ci:test:kuttl-v1-nodepools - env: - LOG_LEVEL: trace - OTLP_DIR: /work/artifacts - OTLP_METRIC_INTERVAL: 5s - key: kuttl-v1-nodepools-run - label: Run Kuttl-V1-Nodepools Tests - notify: - - github_commit_status: - context: Kuttl-V1-Nodepools Tests - plugins: - - github.com/seek-oss/aws-sm-buildkite-plugin#v2.3.2: - json-to-env: - - secret-id: sdlc/prod/buildkite/github_api_token - - secret-id: sdlc/prod/buildkite/redpanda_sample_license - - secret-id: sdlc/prod/buildkite/slack_vbot_token - - https://$GITHUB_API_TOKEN@github.com/redpanda-data/step-slack-notify-buildkite-plugin.git#main: - channel_name: kubernetes-tests - conditions: - branches: - - main - failed: true - message: ':cloud: Kuttl-V1-Nodepools Tests Job Failed' - slack_token_env_var_name: SLACK_VBOT_TOKEN - soft_fail: false - timeout_in_minutes: 30 - - continue_on_failure: true - wait: null - - agents: - queue: pipeline-uploader - allow_dependency_failure: true - command: "" - key: kuttl-v1-nodepools-parse - label: Parse and annotate Kuttl-V1-Nodepools Tests results - plugins: - - github.com/buildkite-plugins/junit-annotate-buildkite-plugin#v2.4.1: - artifacts: work/operator/tests/_e2e_with_flags_artifacts/kuttl-report.xml - context: kuttl-v1-nodepools - report-slowest: 10 - soft_fail: true diff --git a/.changes/unreleased/charts-redpanda-Deprecated-20251021-143834.yaml b/.changes/unreleased/charts-redpanda-Deprecated-20251021-143834.yaml new file mode 100644 index 000000000..4e119631c --- /dev/null +++ b/.changes/unreleased/charts-redpanda-Deprecated-20251021-143834.yaml @@ -0,0 +1,4 @@ +project: charts/redpanda +kind: Deprecated +body: '- `statefulset.sideCars.controllers.createRBAC` is deprecated and no longer respected. In most cases, setting this field to `false` would result in a broken deployment. RBAC may be controlled via `rbac.enabled` or per controller via `statefulset.sideCars.controllers.{pvcUnbinder,brokerDecommissioner}.enabled`.' +time: 2025-10-21T14:38:34.206376-04:00 diff --git a/.changes/unreleased/charts-redpanda-Deprecated-20251021-144413.yaml b/.changes/unreleased/charts-redpanda-Deprecated-20251021-144413.yaml new file mode 100644 index 000000000..56b691e97 --- /dev/null +++ b/.changes/unreleased/charts-redpanda-Deprecated-20251021-144413.yaml @@ -0,0 +1,4 @@ +project: charts/redpanda +kind: Deprecated +body: '`statefulset.sideCars.controllers.run` has been unused for many releases and is now deprecated. Individual controllers may be enabled/disabled by setting their enabled field: `statefulset.sideCars.pvcUnbinder.enabled`, `statefulset.sideCars.brokerDecommissioner.enabled`.' +time: 2025-10-21T14:44:13.331483-04:00 diff --git a/Taskfile.yml b/Taskfile.yml index aec705fa7..f90fdf2b1 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -161,6 +161,11 @@ tasks: vars: CLI_ARGS: '--load {{.CLI_ARGS}}' + build:charts: + desc: "Run helm dep build for all charts" + cmds: + - helm dep build ./charts/redpanda/chart + test:unit: desc: "Run all unit tests (~5m)" vars: @@ -179,6 +184,7 @@ tasks: - task: build:image vars: CLI_ARGS: '' # Don't forward CLI args to build:image + - task: build:charts vars: RUN: '{{ default `"^TestIntegration"` .RUN }}' cmds: @@ -195,6 +201,7 @@ tasks: - task: build:image vars: CLI_ARGS: '' # Don't forward CLI args to build:image + - task: build:charts vars: RUN: '{{ default `"^TestAcceptance"` .RUN }}' GO_TEST_RUNNER: '{{default "go test" .GO_TEST_RUNNER}}' diff --git a/acceptance/features/helm-chart.feature b/acceptance/features/helm-chart.feature new file mode 100644 index 000000000..cae9e6c99 --- /dev/null +++ b/acceptance/features/helm-chart.feature @@ -0,0 +1,42 @@ +@operator:none +Feature: Redpanda Helm Chart + + Scenario: Tolerating Node Failure + Given I helm install "redpanda" "../charts/redpanda/chart" with values: + ```yaml + nameOverride: foobar + fullnameOverride: bazquux + + statefulset: + sideCars: + image: + tag: dev + repository: localhost/redpanda-operator + pvcUnbinder: + enabled: true + unbindAfter: 15s + brokerDecommissioner: + enabled: true + decommissionAfter: 15s + ``` + When I stop the Node running Pod "bazquux-2" + # Wait for the Pod to get evicted from the stopped node. + And Pod "bazquux-2" is eventually Pending + # Observe that it gets rescheduled. + Then Pod "bazquux-2" will eventually be Running + # And showcase that ghost brokers have been pruned. + And kubectl exec -it "bazquux-0" "rpk redpanda admin brokers list | sed -E 's/\s+/ /gm' | cut -d ' ' -f 1,6" will eventually output: + ``` + ID MEMBERSHIP + 0 active + 1 active + 3 active + ``` + And kubectl exec -it "bazquux-0" "rpk redpanda admin brokers list --include-decommissioned | sed -E 's/\s+/ /gm' | cut -d ' ' -f 1,6" will eventually output: + ``` + ID MEMBERSHIP + 0 active + 1 active + 3 active + 2 - + ``` diff --git a/acceptance/features/migration.feature b/acceptance/features/migration.feature index 36380b145..5b5ff3874 100644 --- a/acceptance/features/migration.feature +++ b/acceptance/features/migration.feature @@ -2,7 +2,7 @@ Feature: Helm chart to Redpanda Operator migration @skip:gke @skip:aks @skip:eks Scenario: Migrate from a Helm chart release to a Redpanda custom resource - Given a Helm release named "redpanda-migration-example" of the "redpanda/redpanda" helm chart with the values: + Given I helm install "redpanda-migration-example" "../charts/redpanda/chart" with values: """ # tag::helm-values[] fullnameOverride: name-override diff --git a/acceptance/features/operator-upgrades.feature b/acceptance/features/operator-upgrades.feature index 4e1d2ab12..10181befe 100644 --- a/acceptance/features/operator-upgrades.feature +++ b/acceptance/features/operator-upgrades.feature @@ -1,48 +1,8 @@ @operator:none @vcluster Feature: Upgrading the operator - @skip:gke @skip:aks @skip:eks - Scenario: Operator upgrade from 2.4.5 - Given I install local CRDs from "../operator/config/crd/bases" - And I install redpanda helm chart version "v2.4.5" with the values: - """ - console: - enabled: false - """ - And I apply Kubernetes manifest: - """ - --- - apiVersion: cluster.redpanda.com/v1alpha2 - kind: Redpanda - metadata: - name: operator-upgrade - spec: - clusterSpec: - console: - enabled: false - statefulset: - replicas: 1 - sideCars: - image: - tag: dev - repository: localhost/redpanda-operator - """ - # use just a Ready status check here since that's all the - # old operator supports - And cluster "operator-upgrade" is available - Then I can upgrade to the latest operator with the values: - """ - image: - tag: dev - repository: localhost/redpanda-operator - crds: - experimental: true - """ - # use the new status as this will eventually get set - And cluster "operator-upgrade" should be stable with 1 nodes - @skip:gke @skip:aks @skip:eks Scenario: Operator upgrade from 25.1.3 - And I install redpanda helm chart version "v25.1.3" with the values: + Given I helm install "redpanda-operator" "redpanda/operator" --version v25.1.3 with values: """ crds: enabled: true @@ -68,7 +28,7 @@ Feature: Upgrading the operator # use just a Ready status check here since that's all the # old operator supports And cluster "operator-upgrade" is available - Then I can upgrade to the latest operator with the values: + Then I can helm upgrade "redpanda-operator" "../operator/chart" with values: """ image: tag: dev diff --git a/acceptance/features/vectorized.feature b/acceptance/features/vectorized.feature new file mode 100644 index 000000000..74809ecd9 --- /dev/null +++ b/acceptance/features/vectorized.feature @@ -0,0 +1,520 @@ +Feature: Vectorized Cluster + + # Tests scaling a v1alpha1 Cluster up and down to verify decommissioning works correctly + @skip:gke @skip:aks @skip:eks + Scenario: Vectorized Scaling + Given I apply Kubernetes manifest: + """ + --- + apiVersion: redpanda.vectorized.io/v1alpha1 + kind: Cluster + metadata: + name: decommission + spec: + image: ${DEFAULT_REDPANDA_REPO} + version: ${DEFAULT_REDPANDA_TAG} + replicas: 3 + resources: + requests: + cpu: "100m" + memory: 256Mi + limits: + cpu: "100m" + memory: 256Mi + configuration: + rpcServer: + port: 33145 + kafkaApi: + - port: 9092 + adminApi: + - port: 9644 + pandaproxyApi: + - port: 8082 + developerMode: true + """ + And vectorized cluster "decommission" is available + And Pod "decommission-0" is eventually be Running + Then '{.status.replicas}' of Cluster.v1alpha1.redpanda.vectorized.io "decommission" will be '3' + And '{.status.currentReplicas}' of Cluster.v1alpha1.redpanda.vectorized.io "decommission" will be '3' + And '{.status.readyReplicas}' of Cluster.v1alpha1.redpanda.vectorized.io "decommission" will be '3' + And '{.spec.persistentVolumeClaimRetentionPolicy}' of StatefulSet.v1.apps "decommission" will be '{"whenDeleted": "Delete", "whenScaled": "Delete"}' + # TODO(chrisseto): kubectl apply --server-side doesn't merge partials of + # vectorized clusters, it just nulls out portions of it. We re-specify the + # entire spec to work around the limitation for now. + # First scale down from 3 to 2 + When I apply Kubernetes manifest: + """ + --- + apiVersion: redpanda.vectorized.io/v1alpha1 + kind: Cluster + metadata: + name: decommission + spec: + image: ${DEFAULT_REDPANDA_REPO} + version: ${DEFAULT_REDPANDA_TAG} + replicas: 2 # <-- Change to replicas + resources: + requests: + cpu: "100m" + memory: 256Mi + limits: + cpu: "100m" + memory: 256Mi + configuration: + rpcServer: + port: 33145 + kafkaApi: + - port: 9092 + adminApi: + - port: 9644 + pandaproxyApi: + - port: 8082 + developerMode: true + """ + Then vectorized cluster "decommission" is available + Then '{.status.replicas}' of Cluster.v1alpha1.redpanda.vectorized.io "decommission" will be '2' + And '{.status.currentReplicas}' of Cluster.v1alpha1.redpanda.vectorized.io "decommission" will be '2' + And '{.status.readyReplicas}' of Cluster.v1alpha1.redpanda.vectorized.io "decommission" will be '2' + And kubectl exec -it "decommission-0" "rpk redpanda admin brokers list | sed -E 's/\s+/ /gm' | cut -d ' ' -f 1,6" will eventually output: + """ + ID MEMBERSHIP + 0 active + 1 active + """ + # Scale back up to 3 + When I apply Kubernetes manifest: + """ + --- + apiVersion: redpanda.vectorized.io/v1alpha1 + kind: Cluster + metadata: + name: decommission + spec: + image: ${DEFAULT_REDPANDA_REPO} + version: ${DEFAULT_REDPANDA_TAG} + replicas: 3 # <-- Change to replicas + resources: + requests: + cpu: "100m" + memory: 256Mi + limits: + cpu: "100m" + memory: 256Mi + configuration: + rpcServer: + port: 33145 + kafkaApi: + - port: 9092 + adminApi: + - port: 9644 + pandaproxyApi: + - port: 8082 + developerMode: true + """ + And vectorized cluster "decommission" is available + Then '{.status.readyReplicas}' of Cluster.v1alpha1.redpanda.vectorized.io "decommission" will be '3' + And kubectl exec -it "decommission-0" "rpk redpanda admin brokers list | sed -E 's/\s+/ /gm' | cut -d ' ' -f 1,6" will eventually output: + """ + ID MEMBERSHIP + 0 active + 1 active + 3 active + """ + # Scale down again to verify decommissioning works repeatedly + When I apply Kubernetes manifest: + """ + --- + apiVersion: redpanda.vectorized.io/v1alpha1 + kind: Cluster + metadata: + name: decommission + spec: + image: ${DEFAULT_REDPANDA_REPO} + version: ${DEFAULT_REDPANDA_TAG} + replicas: 2 # <-- Change to replicas + resources: + requests: + cpu: "100m" + memory: 256Mi + limits: + cpu: "100m" + memory: 256Mi + configuration: + rpcServer: + port: 33145 + kafkaApi: + - port: 9092 + adminApi: + - port: 9644 + pandaproxyApi: + - port: 8082 + developerMode: true + """ + And vectorized cluster "decommission" is available + Then '{.status.readyReplicas}' of Cluster.v1alpha1.redpanda.vectorized.io "decommission" will be '2' + And kubectl exec -it "decommission-0" "rpk redpanda admin brokers list | sed -E 's/\s+/ /gm' | cut -d ' ' -f 1,6" will eventually output: + """ + ID MEMBERSHIP + 0 active + 1 active + """ + + # Tests deleting nodepools from a v1alpha1 Cluster without scaling to zero first + # Verifies that the operator properly decommissions nodes incrementally (3->2->1->0->gone) + @skip:gke @skip:aks @skip:eks + Scenario: Vectorized NodePool Deletion + Given I apply Kubernetes manifest: + """ + --- + apiVersion: redpanda.vectorized.io/v1alpha1 + kind: Cluster + metadata: + name: nodepools-delete + spec: + image: ${DEFAULT_REDPANDA_REPO} + version: ${DEFAULT_REDPANDA_TAG} + nodePools: + - name: first + replicas: 2 + storage: {} + cloudCacheStorage: {} + resources: + requests: + cpu: "100m" + memory: 256Mi + limits: + cpu: "100m" + memory: 256Mi + configuration: + rpcServer: + port: 33145 + kafkaApi: + - port: 9092 + adminApi: + - port: 9644 + developerMode: true + resources: {} + """ + And vectorized cluster "nodepools-delete" is available + Then '{.status.replicas}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepools-delete" will be '2' + And '{.status.currentReplicas}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepools-delete" will be '2' + And '{.status.readyReplicas}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepools-delete" will be '2' + And '{.status.nodePools.first}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepools-delete" will be: + """ + {"currentReplicas":2,"replicas":2,"readyReplicas":2,"restarting":false} + """ + + # Add a second nodepool with slightly different resources + When I apply Kubernetes manifest: + """ + --- + apiVersion: redpanda.vectorized.io/v1alpha1 + kind: Cluster + metadata: + name: nodepools-delete + spec: + image: ${DEFAULT_REDPANDA_REPO} + version: ${DEFAULT_REDPANDA_TAG} + nodePools: + - name: first + replicas: 2 + storage: {} + cloudCacheStorage: {} + resources: + requests: + cpu: "100m" + memory: 256Mi + limits: + cpu: "100m" + memory: 256Mi + - name: second + replicas: 2 + storage: {} + cloudCacheStorage: {} + resources: + requests: + cpu: "101m" + memory: 257Mi + limits: + cpu: "101m" + memory: 257Mi + configuration: + rpcServer: + port: 33145 + kafkaApi: + - port: 9092 + adminApi: + - port: 9644 + developerMode: true + """ + Then vectorized cluster "nodepools-delete" is available + And '{.status.replicas}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepools-delete" will be '4' + And '{.status.nodePools.first}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepools-delete" will be: + """ + {"currentReplicas":2,"replicas":2,"readyReplicas":2,"restarting":false} + """ + And '{.status.nodePools.second}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepools-delete" will be: + """ + {"currentReplicas":2,"replicas":2,"readyReplicas":2,"restarting":false} + """ + + # Delete the first nodepool by removing it from the spec entirely + # WITHOUT scaling to zero first - should go 3->2->1->0->gone + When I apply Kubernetes manifest: + """ + --- + apiVersion: redpanda.vectorized.io/v1alpha1 + kind: Cluster + metadata: + name: nodepools-delete + spec: + image: ${DEFAULT_REDPANDA_REPO} + version: ${DEFAULT_REDPANDA_TAG} + nodePools: + - name: second + replicas: 2 + storage: {} + cloudCacheStorage: {} + resources: + requests: + cpu: "101m" + memory: 257Mi + limits: + cpu: "101m" + memory: 257Mi + configuration: + rpcServer: + port: 33145 + kafkaApi: + - port: 9092 + adminApi: + - port: 9644 + developerMode: true + additionalCommandlineArguments: + dump-memory-diagnostics-on-alloc-failure-kind: all + abort-on-seastar-bad-alloc: '' + resources: {} + """ + Then vectorized cluster "nodepools-delete" is available + And '{.status.replicas}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepools-delete" will be '2' + And '{.status.nodePools}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepools-delete" will be: + """ + {"second":{"currentReplicas":2,"replicas":2,"readyReplicas":2,"restarting":false}} + """ + And StatefulSet.v1.apps "nodepools-delete-first" is eventually deleted + + # Tests scaling nodepools in a v1alpha1 Cluster + @skip:gke @skip:aks @skip:eks + Scenario: Vectorized NodePool Scaling + Given I apply Kubernetes manifest: + """ + --- + apiVersion: redpanda.vectorized.io/v1alpha1 + kind: Cluster + metadata: + name: nodepool-cluster + spec: + image: ${DEFAULT_REDPANDA_REPO} + version: ${DEFAULT_REDPANDA_TAG} + nodePools: + - name: nodepool1 + replicas: 2 + storage: {} + cloudCacheStorage: {} + resources: + requests: + cpu: "100m" + memory: 256Mi + limits: + cpu: "100m" + memory: 256Mi + configuration: + rpcServer: + port: 33145 + kafkaApi: + - port: 9092 + adminApi: + - port: 9644 + developerMode: true + additionalCommandlineArguments: + dump-memory-diagnostics-on-alloc-failure-kind: all + abort-on-seastar-bad-alloc: '' + resources: {} + """ + And vectorized cluster "nodepool-cluster" is available + Then '{.status.replicas}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepool-cluster" will be '2' + And '{.status.currentReplicas}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepool-cluster" will be '2' + And '{.status.readyReplicas}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepool-cluster" will be '2' + And '{.status.restarting}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepool-cluster" will be 'false' + And '{.status.nodePools.nodepool1}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepool-cluster" will be: + """ + {"currentReplicas":2,"replicas":2,"readyReplicas":2,"restarting":false} + """ + + # Add a second nodepool + When I apply Kubernetes manifest: + """ + --- + apiVersion: redpanda.vectorized.io/v1alpha1 + kind: Cluster + metadata: + name: nodepool-cluster + spec: + image: ${DEFAULT_REDPANDA_REPO} + version: ${DEFAULT_REDPANDA_TAG} + nodePools: + - name: nodepool1 + replicas: 2 + storage: {} + cloudCacheStorage: {} + resources: + requests: + cpu: "100m" + memory: 256Mi + limits: + cpu: "100m" + memory: 256Mi + - name: nodepool2 + replicas: 2 + storage: {} + cloudCacheStorage: {} + resources: + requests: + cpu: "100m" + memory: 256Mi + limits: + cpu: "100m" + memory: 256Mi + configuration: + rpcServer: + port: 33145 + kafkaApi: + - port: 9092 + adminApi: + - port: 9644 + developerMode: true + additionalCommandlineArguments: + dump-memory-diagnostics-on-alloc-failure-kind: all + abort-on-seastar-bad-alloc: '' + resources: {} + """ + Then vectorized cluster "nodepool-cluster" is available + And '{.status.replicas}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepool-cluster" will be '4' + And '{.status.nodePools.nodepool1}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepool-cluster" will be: + """ + {"currentReplicas":2,"replicas":2,"readyReplicas":2,"restarting":false} + """ + And '{.status.nodePools.nodepool2}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepool-cluster" will be: + """ + {"currentReplicas":2,"replicas":2,"readyReplicas":2,"restarting":false} + """ + + # Scale down the first nodepool to 0 + When I apply Kubernetes manifest: + """ + --- + apiVersion: redpanda.vectorized.io/v1alpha1 + kind: Cluster + metadata: + name: nodepool-cluster + spec: + image: ${DEFAULT_REDPANDA_REPO} + version: ${DEFAULT_REDPANDA_TAG} + nodePools: + - name: nodepool1 + replicas: 0 + storage: {} + cloudCacheStorage: {} + resources: + requests: + cpu: "100m" + memory: 256Mi + limits: + cpu: "100m" + memory: 256Mi + - name: nodepool2 + replicas: 3 + storage: {} + cloudCacheStorage: {} + resources: + requests: + cpu: "100m" + memory: 256Mi + limits: + cpu: "100m" + memory: 256Mi + configuration: + rpcServer: + port: 33145 + kafkaApi: + - port: 9092 + adminApi: + - port: 9644 + developerMode: true + additionalCommandlineArguments: + dump-memory-diagnostics-on-alloc-failure-kind: all + abort-on-seastar-bad-alloc: '' + resources: {} + """ + Then vectorized cluster "nodepool-cluster" is available + And '{.status.replicas}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepool-cluster" will be '2' + And '{.status.nodePools.nodepool1}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepool-cluster" will be: + """ + {"currentReplicas":0,"replicas":0,"readyReplicas":0,"restarting":false} + """ + And '{.status.nodePools.nodepool2}' of Cluster.v1alpha1.redpanda.vectorized.io "nodepool-cluster" will be: + """ + {"currentReplicas":2,"replicas":2,"readyReplicas":2,"restarting":false} + """ + + # Tests that v1alpha1 Clusters can tolerate node failures and properly decommission ghost brokers + @skip:gke @skip:aks @skip:eks + Scenario: Vectorized Tolerating Node Failures + Given I apply Kubernetes manifest: + """ + --- + apiVersion: redpanda.vectorized.io/v1alpha1 + kind: Cluster + metadata: + name: node-failure + spec: + image: ${DEFAULT_REDPANDA_REPO} + version: ${DEFAULT_REDPANDA_TAG} + replicas: 3 + resources: + requests: + cpu: "100m" + memory: 256Mi + limits: + cpu: "100m" + memory: 256Mi + configuration: + rpcServer: + port: 33145 + kafkaApi: + - port: 9092 + adminApi: + - port: 9644 + pandaproxyApi: + - port: 8082 + developerMode: true + """ + And vectorized cluster "node-failure" is available + When I stop the Node running Pod "node-failure-2" + And Pod "node-failure-2" is eventually Pending + Then Pod "node-failure-2" will eventually be Running + And kubectl exec -it "node-failure-0" "rpk redpanda admin brokers list | sed -E 's/\s+/ /gm' | cut -d ' ' -f 1,6" will eventually output: + """ + ID MEMBERSHIP + 0 active + 1 active + 3 active + """ + And kubectl exec -it "node-failure-0" "rpk redpanda admin brokers list --include-decommissioned | sed -E 's/\s+/ /gm' | cut -d ' ' -f 1,6" will eventually output: + """ + ID MEMBERSHIP + 0 active + 1 active + 3 active + 2 - + """ diff --git a/acceptance/main_test.go b/acceptance/main_test.go index 62d414e60..ebb3e5919 100644 --- a/acceptance/main_test.go +++ b/acceptance/main_test.go @@ -93,7 +93,7 @@ var setupSuite = sync.OnceValues(func() (*framework.Suite, error) { return } t.Log("Installing default Redpanda operator chart") - t.InstallLocalHelmChart(ctx, "../operator/chart", helm.InstallOptions{ + t.InstallHelmChart(ctx, "../operator/chart", helm.InstallOptions{ Name: "redpanda-operator", Namespace: namespace, Values: operatorchart.PartialValues{ @@ -127,6 +127,11 @@ var setupSuite = sync.OnceValues(func() (*framework.Suite, error) { // broker list is pruned. "--cluster-connection-timeout=500ms", "--enable-shadowlinks", + // This flag affects vectorized controllers only and is + // required for the nodepool tests to work. + "--auto-delete-pvcs", + // Enable the STS Decommissioner for vectorized clusters. + "--enable-ghost-broker-decommissioner", }, }, }) @@ -205,7 +210,7 @@ func OperatorTag(ctx context.Context, t framework.TestingT, args ...string) cont } t.Logf("Installing Redpanda operator chart: %q", name) - t.InstallLocalHelmChart(ctx, "../operator/chart", helm.InstallOptions{ + t.InstallHelmChart(ctx, "../operator/chart", helm.InstallOptions{ Name: "redpanda-operator", Namespace: t.Namespace(), ValuesFile: filepath.Join("operator", fmt.Sprintf("%s.yaml", name)), diff --git a/acceptance/steps/cluster.go b/acceptance/steps/cluster.go index 29e8cf47d..96522e033 100644 --- a/acceptance/steps/cluster.go +++ b/acceptance/steps/cluster.go @@ -48,7 +48,9 @@ func checkV1ClusterAvailability(ctx context.Context, t framework.TestingT, clust Status: corev1.ConditionTrue, }, cluster.Status.Conditions) - hasCondition := hasConditionQuiescent + // V1 sets ObservedGeneration on the status as a whole. That must be + // observed to be == .Generate to ensure the status is up to date. + hasCondition := hasConditionQuiescent && cluster.Generation == cluster.Status.ObservedGeneration t.Logf(`Checking cluster resource conditions contains "OperatorQuiescent"? %v`, hasCondition) return hasCondition @@ -166,6 +168,25 @@ func shutdownRandomClusterNode(ctx context.Context, t framework.TestingT, cluste t.ShutdownNode(ctx, pod.Spec.NodeName) } +func shutdownNodeOfPod(ctx context.Context, t framework.TestingT, podName string) { + t.ResourceKey(podName) + + var pod corev1.Pod + require.NoError(t, t.Get(ctx, t.ResourceKey(podName), &pod)) + + var node corev1.Node + require.NoError(t, t.Get(ctx, t.ResourceKey(pod.Spec.NodeName), &node)) + + node.Spec.Taints = append(node.Spec.Taints, corev1.Taint{ + Key: "node.kubernetes.io/out-of-service", + Effect: corev1.TaintEffectNoExecute, + }) + + require.NoError(t, t.Update(ctx, &node)) + + t.ShutdownNode(ctx, pod.Spec.NodeName) +} + func deleteNotReadyKubernetesNodes(ctx context.Context, t framework.TestingT) { var nodes corev1.NodeList require.NoError(t, t.List(ctx, &nodes)) diff --git a/acceptance/steps/helm.go b/acceptance/steps/helm.go index 1fb7e913c..91cbf84e2 100644 --- a/acceptance/steps/helm.go +++ b/acceptance/steps/helm.go @@ -12,40 +12,45 @@ package steps import ( "context" "fmt" + "strings" "github.com/cucumber/godog" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" "sigs.k8s.io/yaml" framework "github.com/redpanda-data/redpanda-operator/harpoon" "github.com/redpanda-data/redpanda-operator/pkg/helm" ) -// The unused parameter is meant to specify a Helm chart place (remote or local in the file system). -func iInstallHelmRelease(ctx context.Context, t framework.TestingT, helmReleaseName, _ string, values *godog.DocString) { +func iHelmInstall(ctx context.Context, t framework.TestingT, name, chart, version string, values *godog.DocString) { + // We don't really reference anything other than the redpanda repo, so just + // handle repos as a naive check here. + if strings.HasPrefix(chart, "redpanda/") { + t.AddHelmRepo(ctx, "redpanda", "https://charts.redpanda.com") + } + var valuesMap map[string]any require.NoError(t, yaml.Unmarshal([]byte(values.Content), &valuesMap)) - helmClient, err := helm.New(helm.Options{ - KubeConfig: rest.CopyConfig(t.RestConfig()), + t.InstallHelmChart(ctx, chart, helm.InstallOptions{ + Name: name, + Version: version, + Values: valuesMap, + Namespace: t.Namespace(), }) - require.NoError(t, err) - - require.NoError(t, helmClient.RepoAdd(ctx, "console", "https://charts.redpanda.com")) +} - path := "../charts/redpanda/chart" - require.NoError(t, helmClient.DependencyBuild(ctx, path)) +func iHelmUpgrade(ctx context.Context, t framework.TestingT, name, chart, version string, values *godog.DocString) { + var valuesMap map[string]any + require.NoError(t, yaml.Unmarshal([]byte(values.Content), &valuesMap)) - t.Logf("installing chart %q", path) - _, err = helmClient.Install(ctx, path, helm.InstallOptions{ - Name: helmReleaseName, - Namespace: t.Namespace(), + t.UpgradeHelmChart(ctx, name, chart, helm.UpgradeOptions{ + Version: version, Values: valuesMap, + Namespace: t.Namespace(), }) - require.NoError(t, err) } func iDeleteHelmReleaseSecret(ctx context.Context, t framework.TestingT, helmReleaseName string) { diff --git a/acceptance/steps/helpers.go b/acceptance/steps/helpers.go index 160c52c21..e17ffed68 100644 --- a/acceptance/steps/helpers.go +++ b/acceptance/steps/helpers.go @@ -32,9 +32,7 @@ import ( authenticationv1 "k8s.io/api/authentication/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -651,20 +649,6 @@ func clientsForOperator(ctx context.Context, includeTLS bool, serviceAccountName } } -func removeAllFinalizers(ctx context.Context, t framework.TestingT, gvk schema.GroupVersionKind) { - list := &unstructured.UnstructuredList{} - list.SetGroupVersionKind(gvk) - - // swallow errors for non-existent crds - if err := t.List(ctx, list); err == nil { - for i := range list.Items { - item := list.Items[i] - item.SetFinalizers(nil) - require.NoError(t, t.Update(ctx, &item)) - } - } -} - func getVersion(t framework.TestingT, version string) string { version = strings.TrimSpace(version) if version != "" { diff --git a/acceptance/steps/k8s.go b/acceptance/steps/k8s.go index 640d43f3e..7d2658f51 100644 --- a/acceptance/steps/k8s.go +++ b/acceptance/steps/k8s.go @@ -12,6 +12,7 @@ package steps import ( "bytes" "context" + "encoding/json" "fmt" "reflect" "strings" @@ -21,6 +22,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/util/jsonpath" @@ -36,6 +38,15 @@ import ( // for type assertions var _ client.Object = (client.Object)(nil) +func podWillEventuallyBeInPhase(ctx context.Context, t framework.TestingT, podName string, phase string) { + require.EventuallyWithT(t, func(c *assert.CollectT) { + var pod corev1.Pod + require.NoError(c, t.Get(ctx, t.ResourceKey(podName), &pod)) + + require.Equal(c, corev1.PodPhase(phase), pod.Status.Phase) + }, 5*time.Minute, 5*time.Second) +} + func kubernetesObjectHasClusterOwner(ctx context.Context, t framework.TestingT, groupVersionKind, resourceName, clusterName string) { var cluster redpandav1alpha2.Redpanda @@ -76,6 +87,17 @@ func kubernetesObjectHasClusterOwner(ctx context.Context, t framework.TestingT, t.Logf("Object has cluster owner reference for %q", clusterName) } +func kubernetesObjectJSONPathMatchesDocString(ctx context.Context, t framework.TestingT, path, gvk, name string, expected *godog.DocString) { + kubernetesObjectJSONPathMatches(ctx, t, path, gvk, name, expected.Content) +} + +func kubernetesObjectJSONPathMatches(ctx context.Context, t framework.TestingT, path, gvk, name, expected string) { + result := execJSONPath(ctx, t, path, gvk, name) + marshalled, err := json.Marshal(result) + require.NoError(t, err) + require.JSONEq(t, expected, string(marshalled)) +} + type recordedVariable string func recordVariable(ctx context.Context, t framework.TestingT, jsonPath, groupVersionKind, resourceName, variableName string) context.Context { @@ -163,7 +185,23 @@ func execJSONPath(ctx context.Context, t framework.TestingT, jsonPath, groupVers return nil } -func iExecInPodMatching( +func execInPodEventuallyMatches( + ctx context.Context, + t framework.TestingT, + podName string, + cmd string, + expected *godog.DocString, +) { + ctl, err := kube.FromRESTConfig(t.RestConfig()) + require.NoError(t, err) + + pod, err := kube.Get[corev1.Pod](ctx, ctl, kube.ObjectKey{Namespace: t.Namespace(), Name: podName}) + require.NoErrorf(t, err, "Pod with name %q not found", podName) + + execInPod(t, ctx, ctl, pod, cmd, expected) +} + +func execInPodMatchingEventuallyMatches( ctx context.Context, t framework.TestingT, cmd, @@ -181,11 +219,39 @@ func iExecInPodMatching( require.True(t, len(pods.Items) > 0, "selector %q found no Pods", selector.String()) - var stdout bytes.Buffer - require.NoError(t, ctl.Exec(ctx, &pods.Items[0], kube.ExecOptions{ - Command: []string{"sh", "-c", cmd}, - Stdout: &stdout, - })) + execInPod(t, ctx, ctl, &pods.Items[0], cmd, expected) +} - assert.Equal(t, strings.TrimSpace(expected.Content), strings.TrimSpace(stdout.String())) +func execInPod( + t framework.TestingT, + ctx context.Context, + ctl *kube.Ctl, + pod *corev1.Pod, + cmd string, + expected *godog.DocString, +) { + require.EventuallyWithT(t, func(collect *assert.CollectT) { + var stdout bytes.Buffer + require.NoError(collect, ctl.Exec(ctx, pod, kube.ExecOptions{ + Command: []string{"sh", "-c", cmd}, + Stdout: &stdout, + })) + + assert.Equal(collect, strings.TrimSpace(expected.Content), strings.TrimSpace(stdout.String())) + }, 5*time.Minute, 5*time.Second) +} + +func kubernetesResourceIsEventuallyDeleted(ctx context.Context, t framework.TestingT, groupVersionKind, resourceName string) { + gvk, _ := schema.ParseKindArg(groupVersionKind) + obj, err := t.Scheme().New(*gvk) + require.NoError(t, err) + + t.Logf("Checking resource %s %q is eventually deleted", groupVersionKind, resourceName) + require.Eventually(t, func() bool { + err := t.Get(ctx, t.ResourceKey(resourceName), obj.(client.Object)) + deleted := apierrors.IsNotFound(err) + t.Logf("Checking resource %s %q is deleted? %v", groupVersionKind, resourceName, deleted) + return deleted + }, 5*time.Minute, 5*time.Second, "Resource %s %q was never deleted", groupVersionKind, resourceName) + t.Logf("Resource %s %q successfully deleted", groupVersionKind, resourceName) } diff --git a/acceptance/steps/operator.go b/acceptance/steps/operator.go index d087e82d2..48d187abe 100644 --- a/acceptance/steps/operator.go +++ b/acceptance/steps/operator.go @@ -11,21 +11,14 @@ package steps import ( "context" - "fmt" - "os" "regexp" - "strconv" - "strings" - "github.com/cucumber/godog" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" framework "github.com/redpanda-data/redpanda-operator/harpoon" - "github.com/redpanda-data/redpanda-operator/pkg/helm" ) func operatorIsRunning(ctx context.Context, t framework.TestingT) { @@ -93,70 +86,3 @@ func createClusterRoleBinding(ctx context.Context, serviceAccountName, clusterRo })) }) } - -func iCanUpgradeToTheLatestOperatorWithTheValues(ctx context.Context, t framework.TestingT, values *godog.DocString) { - file, err := os.CreateTemp("", "values-*.yaml") - require.NoError(t, err) - - _, err = file.Write([]byte(values.Content)) - require.NoError(t, err) - require.NoError(t, file.Close()) - - t.Cleanup(func(ctx context.Context) { - require.NoError(t, os.RemoveAll(file.Name())) - }) - - t.UpgradeLocalHelmChart(ctx, "../operator/chart", "redpanda-operator", helm.UpgradeOptions{ - Namespace: t.Namespace(), - ValuesFile: file.Name(), - NoWait: true, - }) -} - -func iInstallRedpandaHelmChartVersionWithTheValues(ctx context.Context, t framework.TestingT, version string, values *godog.DocString) { - file, err := os.CreateTemp("", "values-*.yaml") - require.NoError(t, err) - - _, err = file.Write([]byte(values.Content)) - require.NoError(t, err) - require.NoError(t, file.Close()) - - t.Cleanup(func(ctx context.Context) { - require.NoError(t, os.RemoveAll(file.Name())) - }) - - major, err := strconv.Atoi(strings.Split(strings.TrimPrefix(version, "v"), ".")[0]) - require.NoError(t, err) - - if major < 25 { - // these are needed for old versions of the operator - t.ApplyManifest(ctx, fmt.Sprintf("https://raw.githubusercontent.com/redpanda-data/redpanda-operator/refs/tags/%s/operator/config/crd/bases/toolkit.fluxcd.io/helm-controller.yaml", version)) - t.ApplyManifest(ctx, fmt.Sprintf("https://raw.githubusercontent.com/redpanda-data/redpanda-operator/refs/tags/%s/operator/config/crd/bases/toolkit.fluxcd.io/source-controller.yaml", version)) - } - - t.Cleanup(func(ctx context.Context) { - // make sure we remove all finalizers for these or the CRD cleanup will get wedged - removeAllFinalizers(ctx, t, schema.GroupVersionKind{ - Group: "helm.toolkit.fluxcd.io", - Kind: "HelmRelease", - Version: "v2beta2", - }) - removeAllFinalizers(ctx, t, schema.GroupVersionKind{ - Group: "source.toolkit.fluxcd.io", - Kind: "HelmRepository", - Version: "v1beta1", - }) - removeAllFinalizers(ctx, t, schema.GroupVersionKind{ - Group: "source.toolkit.fluxcd.io", - Kind: "HelmChart", - Version: "v1beta2", - }) - }) - - t.InstallHelmChart(ctx, "https://charts.redpanda.com", "redpanda", "operator", helm.InstallOptions{ - Name: "redpanda-operator", - Namespace: t.Namespace(), - Version: version, - ValuesFile: file.Name(), - }) -} diff --git a/acceptance/steps/register.go b/acceptance/steps/register.go index 1798ad4ac..9af7f46b5 100644 --- a/acceptance/steps/register.go +++ b/acceptance/steps/register.go @@ -15,11 +15,16 @@ func init() { // General scenario steps framework.RegisterStep(`^(vectorized )?cluster "([^"]*)" is available$`, checkClusterAvailability) framework.RegisterStep(`^I apply Kubernetes manifest:$`, iApplyKubernetesManifest) - framework.RegisterStep(`^I exec "([^"]+)" in a Pod matching "([^"]+)", it will output:$`, iExecInPodMatching) + framework.RegisterStep(`^I exec "([^"]+)" in a Pod matching "([^"]+)", it will output:$`, execInPodMatchingEventuallyMatches) + framework.RegisterStep(`^kubectl exec -it "([^"]+)" "([^"]+)" will eventually output:$`, execInPodEventuallyMatches) + framework.RegisterStep(`Pod "([^"]+)" (?:will|is) eventually(?: be)? (Running|Pending)`, podWillEventuallyBeInPhase) framework.RegisterStep(`^I store "([^"]*)" of Kubernetes object with type "([^"]*)" and name "([^"]*)" as "([^"]*)"$`, recordVariable) + framework.RegisterStep(`^'([^']+)' of (\S+) "([^"]+)" (?:is|will be):$`, kubernetesObjectJSONPathMatchesDocString) + framework.RegisterStep(`^'([^']+)' of (\S+) "([^"]+)" (?:is|will be) '([^']+)'$`, kubernetesObjectJSONPathMatches) framework.RegisterStep(`^the recorded value "([^"]*)" has the same value as "([^"]*)" of the Kubernetes object with type "([^"]*)" and name "([^"]*)"$`, assertVariableValue) framework.RegisterStep(`^the recorded value "([^"]*)" is one less than "([^"]*)" of the Kubernetes object with type "([^"]*)" and name "([^"]*)"$`, assertVariableValueIncremented) + framework.RegisterStep(`^(\S+) "([^"]+)" is eventually deleted$`, kubernetesResourceIsEventuallyDeleted) framework.RegisterStep(`^I enable feature "([^"]*)" on( vectorized)? cluster "([^"]*)"`, enableDevelopmentFeatureOn) framework.RegisterStep(`^I enable "([^"]*)" logging for the "([^"]*)" logger on( vectorized)? cluster "([^"]*)"`, setLogLevelOn) @@ -71,8 +76,17 @@ func init() { framework.RegisterStep(`^"([^"]*)" service account has bounded "([^"]*)" regexp cluster role name$`, createClusterRoleBinding) framework.RegisterStep(`^its metrics endpoint should accept https request with "([^"]*)" service account token$`, acceptServiceAccountMetricsRequest) + // Helm steps + // I helm install "release-name" "chart/path" with values: + // I can helm install "release-name" "chart/path" with values: + // I helm install "release-name" "chart/path" --version v1.2.3 with values: + framework.RegisterStep(`I(?: can)? helm install "([^"]+)" "([^"]+)"(?: --version (\S+))? with values:`, iHelmInstall) + // I helm upgrade "release-name" "chart/path" with values: + // I can helm upgrade "release-name" "chart/path" with values: + // I helm upgrade "release-name" "chart/path" --version v1.2.3 with values: + framework.RegisterStep(`I(?: can)? helm upgrade "([^"]+)" "([^"]+)"(?: --version (\S+))? with values:`, iHelmUpgrade) + // Helm migration scenario steps - framework.RegisterStep(`^a Helm release named "([^"]*)" of the "([^"]*)" helm chart with the values:$`, iInstallHelmRelease) framework.RegisterStep(`^the Kubernetes object of type "([^"]*)" with name "([^"]*)" has an OwnerReference pointing to the cluster "([^"]*)"$`, kubernetesObjectHasClusterOwner) framework.RegisterStep(`^the helm release for "([^"]*)" can be deleted by removing its stored secret$`, iDeleteHelmReleaseSecret) framework.RegisterStep(`^the cluster "([^"]*)" is healthy$`, redpandaClusterIsHealthy) @@ -94,11 +108,10 @@ func init() { framework.RegisterStep(`^cluster "([^"]*)" should recover$`, checkClusterHealthy) framework.RegisterStep(`^I physically shutdown a kubernetes node for cluster "([^"]*)"$`, shutdownRandomClusterNode) framework.RegisterStep(`^I prune any kubernetes node that is now in a NotReady status$`, deleteNotReadyKubernetesNodes) + framework.RegisterStep(`I stop the Node running Pod "([^"]+)"`, shutdownNodeOfPod) framework.RegisterStep(`^cluster "([^"]*)" has only (\d+) remaining nodes$`, checkClusterNodeCount) // Operator upgrade scenario steps - framework.RegisterStep(`^I can upgrade to the latest operator with the values:$`, iCanUpgradeToTheLatestOperatorWithTheValues) - framework.RegisterStep(`^I install redpanda helm chart version "([^"]*)" with the values:$`, iInstallRedpandaHelmChartVersionWithTheValues) framework.RegisterStep(`^I install local CRDs from "([^"]*)"`, iInstallLocalCRDs) // Console scenario steps diff --git a/charts/redpanda/chart/README.md b/charts/redpanda/chart/README.md index e9bd39202..659a134b8 100644 --- a/charts/redpanda/chart/README.md +++ b/charts/redpanda/chart/README.md @@ -845,6 +845,8 @@ Number of Redpanda brokers (Redpanda Data recommends setting this to the number ### [statefulset.sideCars.controllers.createRBAC](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.sideCars.controllers.createRBAC) +DEPRECATED: Use `rbac.enabled` to control RBAC chart wide or control RBAC by selectively enabling/disabling specific sidecar controllers. Setting this field has no effect. + **Default:** `true` ### [statefulset.sideCars.controllers.enabled](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.sideCars.controllers.enabled) @@ -863,9 +865,11 @@ Number of Redpanda brokers (Redpanda Data recommends setting this to the number **Default:** `":9083"` -### [statefulset.sideCars.controllers.run[0]](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.sideCars.controllers.run[0]) +### [statefulset.sideCars.controllers.run](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.sideCars.controllers.run) + +DEPRECATED: Please use statefulset.sideCars.brokerDecommissioner and statefulset.sideCars.pvcUnbinder Setting this field has no effect. -**Default:** `"all"` +**Default:** `["all"]` ### [statefulset.sideCars.image.repository](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.sideCars.image.repository) diff --git a/charts/redpanda/chart/templates/_rbac.go.tpl b/charts/redpanda/chart/templates/_rbac.go.tpl index 3b9ea2cad..8ed17d647 100644 --- a/charts/redpanda/chart/templates/_rbac.go.tpl +++ b/charts/redpanda/chart/templates/_rbac.go.tpl @@ -5,7 +5,7 @@ {{- $state := (index .a 0) -}} {{- range $_ := (list 1) -}} {{- $_is_returning := false -}} -{{- $mapping := (dict "files/sidecar.Role.yaml" (and $state.Values.rbac.enabled $state.Values.statefulset.sideCars.controllers.createRBAC) "files/pvcunbinder.Role.yaml" (and (get (fromJson (include "redpanda.Sidecars.ShouldCreateRBAC" (dict "a" (list $state.Values.statefulset.sideCars)))) "r") (get (fromJson (include "redpanda.Sidecars.PVCUnbinderEnabled" (dict "a" (list $state.Values.statefulset.sideCars)))) "r")) "files/decommission.Role.yaml" (and (get (fromJson (include "redpanda.Sidecars.ShouldCreateRBAC" (dict "a" (list $state.Values.statefulset.sideCars)))) "r") (get (fromJson (include "redpanda.Sidecars.BrokerDecommissionerEnabled" (dict "a" (list $state.Values.statefulset.sideCars)))) "r")) "files/rpk-debug-bundle.Role.yaml" (and $state.Values.rbac.enabled $state.Values.rbac.rpkDebugBundle)) -}} +{{- $mapping := (dict "files/sidecar.Role.yaml" $state.Values.rbac.enabled "files/pvcunbinder.Role.yaml" (and $state.Values.rbac.enabled $state.Values.statefulset.sideCars.pvcUnbinder.enabled) "files/decommission.Role.yaml" (and $state.Values.rbac.enabled $state.Values.statefulset.sideCars.brokerDecommissioner.enabled) "files/rpk-debug-bundle.Role.yaml" (and $state.Values.rbac.enabled $state.Values.rbac.rpkDebugBundle)) -}} {{- $roles := (coalesce nil) -}} {{- range $file, $enabled := $mapping -}} {{- if (not $enabled) -}} @@ -31,7 +31,7 @@ {{- $state := (index .a 0) -}} {{- range $_ := (list 1) -}} {{- $_is_returning := false -}} -{{- $mapping := (dict "files/pvcunbinder.ClusterRole.yaml" (and (get (fromJson (include "redpanda.Sidecars.ShouldCreateRBAC" (dict "a" (list $state.Values.statefulset.sideCars)))) "r") (get (fromJson (include "redpanda.Sidecars.PVCUnbinderEnabled" (dict "a" (list $state.Values.statefulset.sideCars)))) "r")) "files/decommission.ClusterRole.yaml" (and (get (fromJson (include "redpanda.Sidecars.ShouldCreateRBAC" (dict "a" (list $state.Values.statefulset.sideCars)))) "r") (get (fromJson (include "redpanda.Sidecars.BrokerDecommissionerEnabled" (dict "a" (list $state.Values.statefulset.sideCars)))) "r")) "files/rack-awareness.ClusterRole.yaml" (and $state.Values.rbac.enabled $state.Values.rackAwareness.enabled)) -}} +{{- $mapping := (dict "files/pvcunbinder.ClusterRole.yaml" (and $state.Values.rbac.enabled $state.Values.statefulset.sideCars.pvcUnbinder.enabled) "files/decommission.ClusterRole.yaml" (and $state.Values.rbac.enabled $state.Values.statefulset.sideCars.brokerDecommissioner.enabled) "files/rack-awareness.ClusterRole.yaml" (and $state.Values.rbac.enabled $state.Values.rackAwareness.enabled)) -}} {{- $clusterRoles := (coalesce nil) -}} {{- range $file, $enabled := $mapping -}} {{- if (not $enabled) -}} diff --git a/charts/redpanda/chart/templates/_statefulset.go.tpl b/charts/redpanda/chart/templates/_statefulset.go.tpl index 0ff6236db..2d4412549 100644 --- a/charts/redpanda/chart/templates/_statefulset.go.tpl +++ b/charts/redpanda/chart/templates/_statefulset.go.tpl @@ -458,7 +458,7 @@ {{- $pool := (index .a 1) -}} {{- range $_ := (list 1) -}} {{- $_is_returning := false -}} -{{- $args := (list `/redpanda-operator` `sidecar` `--redpanda-yaml` `/etc/redpanda/redpanda.yaml` `--redpanda-cluster-namespace` $state.Release.Namespace `--redpanda-cluster-name` (get (fromJson (include "redpanda.Fullname" (dict "a" (list $state)))) "r") `--run-broker-probe` `--broker-probe-broker-url` (get (fromJson (include "redpanda.adminURLsCLI" (dict "a" (list $state)))) "r")) -}} +{{- $args := (list `/redpanda-operator` `sidecar` `--redpanda-yaml` `/etc/redpanda/redpanda.yaml` `--redpanda-cluster-namespace` $state.Release.Namespace `--redpanda-cluster-name` (get (fromJson (include "redpanda.Fullname" (dict "a" (list $state)))) "r") (printf "--selector=helm.sh/chart=%s,app.kubernetes.io/name=%s,app.kubernetes.io/instance=%s" (get (fromJson (include "redpanda.ChartLabel" (dict "a" (list $state)))) "r") (get (fromJson (include "redpanda.Name" (dict "a" (list $state)))) "r") $state.Dot.Release.Name) `--run-broker-probe` `--broker-probe-broker-url` (get (fromJson (include "redpanda.adminURLsCLI" (dict "a" (list $state)))) "r")) -}} {{- if $pool.Statefulset.sideCars.brokerDecommissioner.enabled -}} {{- $args = (concat (default (list) $args) (default (list) (list `--run-decommissioner` (printf "--decommission-vote-interval=%s" $pool.Statefulset.sideCars.brokerDecommissioner.decommissionAfter) (printf "--decommission-requeue-timeout=%s" $pool.Statefulset.sideCars.brokerDecommissioner.decommissionRequeueTimeout) `--decommission-vote-count=2`))) -}} {{- end -}} diff --git a/charts/redpanda/chart/templates/_values.go.tpl b/charts/redpanda/chart/templates/_values.go.tpl index 28141c21a..40f4ec117 100644 --- a/charts/redpanda/chart/templates/_values.go.tpl +++ b/charts/redpanda/chart/templates/_values.go.tpl @@ -497,46 +497,6 @@ {{- end -}} {{- end -}} -{{- define "redpanda.Sidecars.PVCUnbinderEnabled" -}} -{{- $s := (index .a 0) -}} -{{- range $_ := (list 1) -}} -{{- $_is_returning := false -}} -{{- $_is_returning = true -}} -{{- (dict "r" (and $s.controllers.enabled $s.pvcUnbinder.enabled)) | toJson -}} -{{- break -}} -{{- end -}} -{{- end -}} - -{{- define "redpanda.Sidecars.BrokerDecommissionerEnabled" -}} -{{- $s := (index .a 0) -}} -{{- range $_ := (list 1) -}} -{{- $_is_returning := false -}} -{{- $_is_returning = true -}} -{{- (dict "r" (and $s.controllers.enabled $s.brokerDecommissioner.enabled)) | toJson -}} -{{- break -}} -{{- end -}} -{{- end -}} - -{{- define "redpanda.Sidecars.ShouldCreateRBAC" -}} -{{- $s := (index .a 0) -}} -{{- range $_ := (list 1) -}} -{{- $_is_returning := false -}} -{{- $_is_returning = true -}} -{{- (dict "r" (or ((and $s.controllers.enabled $s.controllers.createRBAC)) (get (fromJson (include "redpanda.Sidecars.AdditionalSidecarControllersEnabled" (dict "a" (list $s)))) "r"))) | toJson -}} -{{- break -}} -{{- end -}} -{{- end -}} - -{{- define "redpanda.Sidecars.AdditionalSidecarControllersEnabled" -}} -{{- $s := (index .a 0) -}} -{{- range $_ := (list 1) -}} -{{- $_is_returning := false -}} -{{- $_is_returning = true -}} -{{- (dict "r" (or $s.pvcUnbinder.enabled $s.brokerDecommissioner.enabled)) | toJson -}} -{{- break -}} -{{- end -}} -{{- end -}} - {{- define "redpanda.Listeners.InUseServerCerts" -}} {{- $l := (index .a 0) -}} {{- $tls := (index .a 1) -}} @@ -667,9 +627,9 @@ {{- $seen := (dict) -}} {{- $deduped := (coalesce nil) -}} {{- range $_, $item := $items -}} -{{- $_1039___ok_11 := (get (fromJson (include "_shims.dicttest" (dict "a" (list $seen $item.key false)))) "r") -}} -{{- $_ := (index $_1039___ok_11 0) -}} -{{- $ok_11 := (index $_1039___ok_11 1) -}} +{{- $_1028___ok_11 := (get (fromJson (include "_shims.dicttest" (dict "a" (list $seen $item.key false)))) "r") -}} +{{- $_ := (index $_1028___ok_11 0) -}} +{{- $ok_11 := (index $_1028___ok_11 1) -}} {{- if $ok_11 -}} {{- continue -}} {{- end -}} @@ -892,9 +852,9 @@ {{- $name := (index .a 1) -}} {{- range $_ := (list 1) -}} {{- $_is_returning := false -}} -{{- $_1327_cert_ok := (get (fromJson (include "_shims.dicttest" (dict "a" (list $m $name (dict "enabled" (coalesce nil) "caEnabled" false "applyInternalDNSNames" (coalesce nil) "duration" "" "issuerRef" (coalesce nil) "secretRef" (coalesce nil) "clientSecretRef" (coalesce nil)))))) "r") -}} -{{- $cert := (index $_1327_cert_ok 0) -}} -{{- $ok := (index $_1327_cert_ok 1) -}} +{{- $_1316_cert_ok := (get (fromJson (include "_shims.dicttest" (dict "a" (list $m $name (dict "enabled" (coalesce nil) "caEnabled" false "applyInternalDNSNames" (coalesce nil) "duration" "" "issuerRef" (coalesce nil) "secretRef" (coalesce nil) "clientSecretRef" (coalesce nil)))))) "r") -}} +{{- $cert := (index $_1316_cert_ok 0) -}} +{{- $ok := (index $_1316_cert_ok 1) -}} {{- if (not $ok) -}} {{- $_ := (fail (printf "Certificate %q referenced, but not found in the tls.certs map" $name)) -}} {{- end -}} @@ -1373,9 +1333,9 @@ {{- $result := (dict) -}} {{- range $k, $v := $c -}} {{- if (not (empty $v)) -}} -{{- $_1853___ok_15 := (get (fromJson (include "_shims.asnumeric" (dict "a" (list $v)))) "r") -}} -{{- $_ := ((index $_1853___ok_15 0) | float64) -}} -{{- $ok_15 := (index $_1853___ok_15 1) -}} +{{- $_1842___ok_15 := (get (fromJson (include "_shims.asnumeric" (dict "a" (list $v)))) "r") -}} +{{- $_ := ((index $_1842___ok_15 0) | float64) -}} +{{- $ok_15 := (index $_1842___ok_15 1) -}} {{- if $ok_15 -}} {{- $_ := (set $result $k $v) -}} {{- else -}}{{- if (kindIs "bool" $v) -}} @@ -1401,9 +1361,9 @@ {{- $_is_returning := false -}} {{- $result := (dict) -}} {{- range $k, $v := $c -}} -{{- $_1873_b_16_ok_17 := (get (fromJson (include "_shims.typetest" (dict "a" (list "bool" $v false)))) "r") -}} -{{- $b_16 := (index $_1873_b_16_ok_17 0) -}} -{{- $ok_17 := (index $_1873_b_16_ok_17 1) -}} +{{- $_1862_b_16_ok_17 := (get (fromJson (include "_shims.typetest" (dict "a" (list "bool" $v false)))) "r") -}} +{{- $b_16 := (index $_1862_b_16_ok_17 0) -}} +{{- $ok_17 := (index $_1862_b_16_ok_17 1) -}} {{- if $ok_17 -}} {{- $_ := (set $result $k $b_16) -}} {{- continue -}} @@ -1446,15 +1406,15 @@ {{- $config := (index .a 1) -}} {{- range $_ := (list 1) -}} {{- $_is_returning := false -}} -{{- $_1918___hasAccessKey := (get (fromJson (include "_shims.dicttest" (dict "a" (list $config "cloud_storage_access_key" (coalesce nil))))) "r") -}} -{{- $_ := (index $_1918___hasAccessKey 0) -}} -{{- $hasAccessKey := (index $_1918___hasAccessKey 1) -}} -{{- $_1919___hasSecretKey := (get (fromJson (include "_shims.dicttest" (dict "a" (list $config "cloud_storage_secret_key" (coalesce nil))))) "r") -}} -{{- $_ := (index $_1919___hasSecretKey 0) -}} -{{- $hasSecretKey := (index $_1919___hasSecretKey 1) -}} -{{- $_1920___hasSharedKey := (get (fromJson (include "_shims.dicttest" (dict "a" (list $config "cloud_storage_azure_shared_key" (coalesce nil))))) "r") -}} -{{- $_ := (index $_1920___hasSharedKey 0) -}} -{{- $hasSharedKey := (index $_1920___hasSharedKey 1) -}} +{{- $_1907___hasAccessKey := (get (fromJson (include "_shims.dicttest" (dict "a" (list $config "cloud_storage_access_key" (coalesce nil))))) "r") -}} +{{- $_ := (index $_1907___hasAccessKey 0) -}} +{{- $hasAccessKey := (index $_1907___hasAccessKey 1) -}} +{{- $_1908___hasSecretKey := (get (fromJson (include "_shims.dicttest" (dict "a" (list $config "cloud_storage_secret_key" (coalesce nil))))) "r") -}} +{{- $_ := (index $_1908___hasSecretKey 0) -}} +{{- $hasSecretKey := (index $_1908___hasSecretKey 1) -}} +{{- $_1909___hasSharedKey := (get (fromJson (include "_shims.dicttest" (dict "a" (list $config "cloud_storage_azure_shared_key" (coalesce nil))))) "r") -}} +{{- $_ := (index $_1909___hasSharedKey 0) -}} +{{- $hasSharedKey := (index $_1909___hasSharedKey 1) -}} {{- $envvars := (coalesce nil) -}} {{- if (and (not $hasAccessKey) (get (fromJson (include "redpanda.SecretRef.IsValid" (dict "a" (list $tsc.accessKey)))) "r")) -}} {{- $envvars = (concat (default (list) $envvars) (list (mustMergeOverwrite (dict "name" "") (dict "name" "REDPANDA_CLOUD_STORAGE_ACCESS_KEY" "valueFrom" (get (fromJson (include "redpanda.SecretRef.AsSource" (dict "a" (list $tsc.accessKey)))) "r"))))) -}} @@ -1477,12 +1437,12 @@ {{- $c := (index .a 0) -}} {{- range $_ := (list 1) -}} {{- $_is_returning := false -}} -{{- $_1956___containerExists := (get (fromJson (include "_shims.dicttest" (dict "a" (list $c "cloud_storage_azure_container" (coalesce nil))))) "r") -}} -{{- $_ := (index $_1956___containerExists 0) -}} -{{- $containerExists := (index $_1956___containerExists 1) -}} -{{- $_1957___accountExists := (get (fromJson (include "_shims.dicttest" (dict "a" (list $c "cloud_storage_azure_storage_account" (coalesce nil))))) "r") -}} -{{- $_ := (index $_1957___accountExists 0) -}} -{{- $accountExists := (index $_1957___accountExists 1) -}} +{{- $_1945___containerExists := (get (fromJson (include "_shims.dicttest" (dict "a" (list $c "cloud_storage_azure_container" (coalesce nil))))) "r") -}} +{{- $_ := (index $_1945___containerExists 0) -}} +{{- $containerExists := (index $_1945___containerExists 1) -}} +{{- $_1946___accountExists := (get (fromJson (include "_shims.dicttest" (dict "a" (list $c "cloud_storage_azure_storage_account" (coalesce nil))))) "r") -}} +{{- $_ := (index $_1946___accountExists 0) -}} +{{- $accountExists := (index $_1946___accountExists 1) -}} {{- $_is_returning = true -}} {{- (dict "r" (and $containerExists $accountExists)) | toJson -}} {{- break -}} @@ -1493,9 +1453,9 @@ {{- $c := (index .a 0) -}} {{- range $_ := (list 1) -}} {{- $_is_returning := false -}} -{{- $_1962_value_ok := (get (fromJson (include "_shims.dicttest" (dict "a" (list $c `cloud_storage_cache_size` (coalesce nil))))) "r") -}} -{{- $value := (index $_1962_value_ok 0) -}} -{{- $ok := (index $_1962_value_ok 1) -}} +{{- $_1951_value_ok := (get (fromJson (include "_shims.dicttest" (dict "a" (list $c `cloud_storage_cache_size` (coalesce nil))))) "r") -}} +{{- $value := (index $_1951_value_ok 0) -}} +{{- $ok := (index $_1951_value_ok 1) -}} {{- if (not $ok) -}} {{- $_is_returning = true -}} {{- (dict "r" (coalesce nil)) | toJson -}} diff --git a/charts/redpanda/chart/values.yaml b/charts/redpanda/chart/values.yaml index f8275364e..bdcacf5e7 100644 --- a/charts/redpanda/chart/values.yaml +++ b/charts/redpanda/chart/values.yaml @@ -690,14 +690,17 @@ statefulset: configWatcher: enabled: true controllers: - # You must also enable RBAC, `rbac.enabled=true`, to deploy this sidecar enabled: false + # -- DEPRECATED: Use `rbac.enabled` to control RBAC chart wide or control RBAC by selectively enabling/disabling specific sidecar controllers. + # Setting this field has no effect. + createRBAC: true healthProbeAddress: ":8085" metricsAddress: ":9082" pprofAddress: ":9083" + # -- DEPRECATED: Please use statefulset.sideCars.brokerDecommissioner and statefulset.sideCars.pvcUnbinder + # Setting this field has no effect. run: - all - createRBAC: true initContainers: fsValidator: enabled: false diff --git a/charts/redpanda/rbac.go b/charts/redpanda/rbac.go index 5a47643f0..6894976e9 100644 --- a/charts/redpanda/rbac.go +++ b/charts/redpanda/rbac.go @@ -22,9 +22,9 @@ import ( func Roles(state *RenderState) []*rbacv1.Role { // path of static role definition -> Enabled mapping := map[string]bool{ - "files/sidecar.Role.yaml": state.Values.RBAC.Enabled && state.Values.Statefulset.SideCars.Controllers.CreateRBAC, - "files/pvcunbinder.Role.yaml": state.Values.Statefulset.SideCars.ShouldCreateRBAC() && state.Values.Statefulset.SideCars.PVCUnbinderEnabled(), - "files/decommission.Role.yaml": state.Values.Statefulset.SideCars.ShouldCreateRBAC() && state.Values.Statefulset.SideCars.BrokerDecommissionerEnabled(), + "files/sidecar.Role.yaml": state.Values.RBAC.Enabled, + "files/pvcunbinder.Role.yaml": state.Values.RBAC.Enabled && state.Values.Statefulset.SideCars.PVCUnbinder.Enabled, + "files/decommission.Role.yaml": state.Values.RBAC.Enabled && state.Values.Statefulset.SideCars.BrokerDecommissioner.Enabled, "files/rpk-debug-bundle.Role.yaml": state.Values.RBAC.Enabled && state.Values.RBAC.RPKDebugBundle, } @@ -55,8 +55,8 @@ func Roles(state *RenderState) []*rbacv1.Role { func ClusterRoles(state *RenderState) []*rbacv1.ClusterRole { // path of static ClusterRole definition -> Enabled mapping := map[string]bool{ - "files/pvcunbinder.ClusterRole.yaml": state.Values.Statefulset.SideCars.ShouldCreateRBAC() && state.Values.Statefulset.SideCars.PVCUnbinderEnabled(), - "files/decommission.ClusterRole.yaml": state.Values.Statefulset.SideCars.ShouldCreateRBAC() && state.Values.Statefulset.SideCars.BrokerDecommissionerEnabled(), + "files/pvcunbinder.ClusterRole.yaml": state.Values.RBAC.Enabled && state.Values.Statefulset.SideCars.PVCUnbinder.Enabled, + "files/decommission.ClusterRole.yaml": state.Values.RBAC.Enabled && state.Values.Statefulset.SideCars.BrokerDecommissioner.Enabled, "files/rack-awareness.ClusterRole.yaml": state.Values.RBAC.Enabled && state.Values.RackAwareness.Enabled, } diff --git a/charts/redpanda/statefulset.go b/charts/redpanda/statefulset.go index a2151b41e..cbc524132 100644 --- a/charts/redpanda/statefulset.go +++ b/charts/redpanda/statefulset.go @@ -815,6 +815,13 @@ func statefulSetContainerSidecar(state *RenderState, pool Pool) *corev1.Containe state.Release.Namespace, `--redpanda-cluster-name`, Fullname(state), + // Values pulled from FullLabels. + fmt.Sprintf( + "--selector=helm.sh/chart=%s,app.kubernetes.io/name=%s,app.kubernetes.io/instance=%s", + ChartLabel(state), + Name(state), + state.Dot.Release.Name, + ), `--run-broker-probe`, `--broker-probe-broker-url`, // even though this is named "...URLs", it returns diff --git a/charts/redpanda/testdata/template-cases.golden.txtar b/charts/redpanda/testdata/template-cases.golden.txtar index 0cd7660c9..c00eaf470 100644 --- a/charts/redpanda/testdata/template-cases.golden.txtar +++ b/charts/redpanda/testdata/template-cases.golden.txtar @@ -932,6 +932,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -2252,6 +2253,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -3427,6 +3429,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -4803,6 +4806,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -6094,6 +6098,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -7630,6 +7635,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -9166,6 +9172,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -10605,6 +10612,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -13635,6 +13643,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -15152,6 +15161,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -16640,6 +16650,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -18081,6 +18092,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -19333,6 +19345,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -20574,6 +20587,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -22045,6 +22059,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -23487,6 +23502,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -24926,6 +24942,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -26419,6 +26436,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -27928,6 +27946,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -29437,6 +29456,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -30897,6 +30917,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -32407,6 +32428,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -33935,6 +33957,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -35463,6 +35486,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -36943,6 +36967,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -38471,6 +38496,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -39999,6 +40025,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -41527,6 +41554,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -43007,6 +43035,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -44483,6 +44512,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -45923,6 +45953,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -47362,6 +47393,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -48889,6 +48921,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -50334,6 +50367,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -53490,6 +53524,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -56431,6 +56466,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -57877,6 +57913,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).change-name.default.svc.cluster.local.:9644 @@ -59356,6 +59393,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -62310,6 +62348,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -63564,6 +63603,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -65158,6 +65198,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -66587,6 +66628,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -68127,6 +68169,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -69573,6 +69616,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -71045,6 +71089,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -75439,6 +75484,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -78482,6 +78528,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local:9644 @@ -79962,6 +80009,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -81474,6 +81522,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -82986,6 +83035,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -84475,6 +84525,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -85964,6 +86015,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -87418,6 +87470,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -88907,6 +88960,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -90396,6 +90450,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -91838,6 +91893,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -93327,6 +93383,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -94816,6 +94873,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -96258,6 +96316,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -97747,6 +97806,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -99236,6 +99296,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -100678,6 +100739,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -102167,6 +102229,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -103656,6 +103719,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -105098,6 +105162,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -106587,6 +106652,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -108076,6 +108142,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -109518,6 +109585,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -110763,6 +110831,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -120459,6 +120528,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -121999,6 +122069,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -123602,6 +123673,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -125145,6 +125217,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -126691,6 +126764,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -128191,6 +128265,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -129785,6 +129860,7 @@ spec: - default - --redpanda-cluster-name - also-not-redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=not-redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).also-not-redpanda.default.svc.cluster.local.:9644 @@ -131224,6 +131300,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -132663,6 +132740,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -134115,6 +134193,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -135669,6 +135748,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -137182,6 +137262,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -138671,6 +138752,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -140160,6 +140242,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -141602,6 +141685,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -143091,6 +143175,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -144580,6 +144665,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -146029,6 +146115,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 @@ -147630,6 +147717,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9643 @@ -148883,6 +148971,7 @@ spec: - default - --redpanda-cluster-name - redpanda + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=redpanda - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).redpanda.default.svc.cluster.local.:9644 diff --git a/charts/redpanda/values.go b/charts/redpanda/values.go index 4aeba491d..2215121ae 100644 --- a/charts/redpanda/values.go +++ b/charts/redpanda/values.go @@ -860,32 +860,21 @@ type Sidecars struct { Enabled bool `json:"enabled"` } `json:"configWatcher"` Controllers struct { - DeprecatedImage *Image `json:"image"` - Enabled bool `json:"enabled"` - CreateRBAC bool `json:"createRBAC"` - HealthProbeAddress string `json:"healthProbeAddress"` - MetricsAddress string `json:"metricsAddress"` - PprofAddress string `json:"pprofAddress"` - Run []string `json:"run"` + DeprecatedImage *Image `json:"image"` + // Enabled use to act as a global toggle for sidecar controllers. It + // was confusing and is no longer used. + // Deprecated. + Enabled bool `json:"enabled"` + CreateRBAC bool `json:"createRBAC"` + HealthProbeAddress string `json:"healthProbeAddress"` + MetricsAddress string `json:"metricsAddress"` + PprofAddress string `json:"pprofAddress"` + // Run used to be a string list of additional controllers to run. It is no longer used. + // Deprecated. + Run []string `json:"run"` } `json:"controllers"` } -func (s *Sidecars) PVCUnbinderEnabled() bool { - return s.Controllers.Enabled && s.PVCUnbinder.Enabled -} - -func (s *Sidecars) BrokerDecommissionerEnabled() bool { - return s.Controllers.Enabled && s.BrokerDecommissioner.Enabled -} - -func (s *Sidecars) ShouldCreateRBAC() bool { - return (s.Controllers.Enabled && s.Controllers.CreateRBAC) || s.AdditionalSidecarControllersEnabled() -} - -func (s *Sidecars) AdditionalSidecarControllersEnabled() bool { - return s.PVCUnbinder.Enabled || s.BrokerDecommissioner.Enabled -} - type Listeners struct { Admin ListenerConfig[NoAuth] `json:"admin" jsonschema:"required"` HTTP ListenerConfig[HTTPAuthenticationMethod] `json:"http" jsonschema:"required"` diff --git a/gen/pipeline/pipeline.go b/gen/pipeline/pipeline.go index a4f5a8056..62f03a8ba 100644 --- a/gen/pipeline/pipeline.go +++ b/gen/pipeline/pipeline.go @@ -65,12 +65,6 @@ var suites = []TestSuite{ Condition: `build.pull_request.labels includes "run-kuttl-v1"`, Retry: ptr.To(3), }, - { - Name: "kuttl-v1-nodepools", - Required: true, - Timeout: 30 * time.Minute, - JUnitPattern: ptr.To("work/operator/tests/_e2e_with_flags_artifacts/kuttl-report.xml"), - }, } func Cmd() *cobra.Command { diff --git a/harpoon/internal/testing/helm.go b/harpoon/internal/testing/helm.go index 1a5337879..7ffc4ed81 100644 --- a/harpoon/internal/testing/helm.go +++ b/harpoon/internal/testing/helm.go @@ -11,86 +11,59 @@ package testing import ( "context" + "strings" "github.com/stretchr/testify/require" - "k8s.io/client-go/rest" "github.com/redpanda-data/redpanda-operator/pkg/helm" ) -func (t *TestingT) InstallHelmChart(ctx context.Context, url, repo, chart string, options helm.InstallOptions) { - helmClient, err := helm.New(helm.Options{ - KubeConfig: rest.CopyConfig(t.restConfig), - }) - require.NoError(t, err) - require.NoError(t, helmClient.RepoAdd(ctx, repo, url)) - require.NotEqual(t, "", options.Namespace, "namespace must not be blank") - require.NotEqual(t, "", options.Name, "name must not be blank") - - options.CreateNamespace = true - - t.Logf("installing chart %q", repo+"/"+chart) - _, err = helmClient.Install(ctx, repo+"/"+chart, options) - require.NoError(t, err) - - t.Cleanup(func(ctx context.Context) { - t.Logf("uninstalling chart %q", repo+"/"+chart) - require.NoError(t, helmClient.Uninstall(ctx, helm.Release{ - Name: options.Name, - Namespace: options.Namespace, - })) - }) +// AddHelmRepo adds a helm repository by name and URL. +func (t *TestingT) AddHelmRepo(ctx context.Context, name, url string) { + require.NoError(t, t.helmClient.RepoAdd(ctx, name, url)) } -func (t *TestingT) UpgradeHelmChart(ctx context.Context, repo, chart, release string, options helm.UpgradeOptions) { - helmClient, err := helm.New(helm.Options{ - KubeConfig: rest.CopyConfig(t.restConfig), - }) - require.NoError(t, err) - require.NotEqual(t, "", options.Namespace, "namespace must not be blank") - - t.Logf("upgrading chart %q", repo+"/"+chart) - _, err = helmClient.Upgrade(ctx, release, repo+"/"+chart, options) - require.NoError(t, err) -} - -func (t *TestingT) InstallLocalHelmChart(ctx context.Context, path string, options helm.InstallOptions, deps ...helm.Dependency) { - helmClient, err := helm.New(helm.Options{ - KubeConfig: rest.CopyConfig(t.restConfig), - }) - require.NoError(t, err) +// InstallHelmChart installs a helm chart from either a local path or a repo reference. +// +// t.InstallHelmChart("../charts/redpanda/chart") // Local chart +// t.InstallHelmChart("jetstack/cert-manager") // From repo +func (t *TestingT) InstallHelmChart(ctx context.Context, chart string, options helm.InstallOptions) { require.NotEqual(t, "", options.Namespace, "namespace must not be blank") require.NotEqual(t, "", options.Name, "name must not be blank") options.CreateNamespace = true - for _, dep := range deps { - require.NoError(t, helmClient.RepoAdd(ctx, dep.Name, dep.Repository)) - } - - require.NoError(t, helmClient.DependencyBuild(ctx, path)) - - t.Logf("installing chart %q", path) - _, err = helmClient.Install(ctx, path, options) + t.Logf("installing chart %q", chart) + rel, err := t.helmClient.Install(ctx, chart, options) require.NoError(t, err) t.Cleanup(func(ctx context.Context) { - t.Logf("uninstalling chart %q", path) - require.NoError(t, helmClient.Uninstall(ctx, helm.Release{ - Name: options.Name, + t.Logf("uninstalling chart %q", chart) + err := t.helmClient.Uninstall(ctx, helm.Release{ + Name: rel.Name, Namespace: options.Namespace, - })) + }) + + // If the install fails due to a lack of a release, swallow the error. This + // is tear down code, so it's not critical for this to run without issue. + // Some test cases (Namely migration from chart -> operator) will manually + // remove the helm release, in which case we expect to see this error. + // Plumbing in the ability to skip this clean up is quite difficult as it + // would need to be come from a different step. + if err != nil && !strings.Contains(err.Error(), "release: not found") { + require.NoError(t, err) + } }) } -func (t *TestingT) UpgradeLocalHelmChart(ctx context.Context, path, release string, options helm.UpgradeOptions) { - helmClient, err := helm.New(helm.Options{ - KubeConfig: rest.CopyConfig(t.restConfig), - }) - require.NoError(t, err) +// UpgradeHelmChart upgrades a helm chart from either a local path or a repo reference. +// +// t.UpgradeHelmChart("../charts/redpanda/chart") // Local chart +// t.UpgradeHelmChart("jetstack/cert-manager") // From repo +func (t *TestingT) UpgradeHelmChart(ctx context.Context, release, chart string, options helm.UpgradeOptions) { require.NotEqual(t, "", options.Namespace, "namespace must not be blank") - t.Logf("upgrading local chart %q", path) - _, err = helmClient.Upgrade(ctx, release, path, options) + t.Logf("upgrading chart %q", chart) + _, err := t.helmClient.Upgrade(ctx, release, chart, options) require.NoError(t, err) } diff --git a/harpoon/internal/testing/testing.go b/harpoon/internal/testing/testing.go index 455748c94..34fcd8dfb 100644 --- a/harpoon/internal/testing/testing.go +++ b/harpoon/internal/testing/testing.go @@ -25,6 +25,7 @@ import ( "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/redpanda-data/redpanda-operator/pkg/helm" "github.com/redpanda-data/redpanda-operator/pkg/kube" "github.com/redpanda-data/redpanda-operator/pkg/vcluster" ) @@ -99,6 +100,7 @@ type TestingT struct { client.Client *Cleaner + helmClient *helm.Client lastError string activeSubtest *TestingT restConfig *rest.Config @@ -116,10 +118,16 @@ func NewTesting(ctx context.Context, options *TestingOptions, cleaner *Cleaner) restConfig, err := restConfig(options.KubectlOptions) require.NoError(t, err) + helmClient, err := helm.New(helm.Options{ + KubeConfig: rest.CopyConfig(restConfig), + }) + require.NoError(t, err) + return &TestingT{ TestingT: t, Client: client, Cleaner: cleaner, + helmClient: helmClient, restConfig: restConfig, options: options, } diff --git a/harpoon/providers/k3d.go b/harpoon/providers/k3d.go index 6e1e609e0..001d029ff 100644 --- a/harpoon/providers/k3d.go +++ b/harpoon/providers/k3d.go @@ -68,10 +68,8 @@ func (p *K3DProvider) Teardown(_ context.Context) error { } func (p *K3DProvider) LoadImages(_ context.Context, images []string) error { - for _, image := range images { - if err := p.cluster.ImportImage(image); err != nil { - return err - } + if err := p.cluster.ImportImage(images...); err != nil { + return err } return nil } diff --git a/harpoon/types.go b/harpoon/types.go index 85c689f31..ce1312bce 100644 --- a/harpoon/types.go +++ b/harpoon/types.go @@ -57,10 +57,9 @@ type TestingT interface { MarkVariant(variant string) Variant() string - InstallHelmChart(ctx context.Context, url, repo, chart string, options helm.InstallOptions) - UpgradeHelmChart(ctx context.Context, repo, chart, release string, options helm.UpgradeOptions) - InstallLocalHelmChart(ctx context.Context, path string, options helm.InstallOptions, deps ...helm.Dependency) - UpgradeLocalHelmChart(ctx context.Context, path, release string, options helm.UpgradeOptions) + AddHelmRepo(ctx context.Context, name, url string) + InstallHelmChart(ctx context.Context, chart string, options helm.InstallOptions) + UpgradeHelmChart(ctx context.Context, release, chart string, options helm.UpgradeOptions) Namespace() string RestConfig() *rest.Config diff --git a/operator/config/e2e-tests-with-flags/kustomization.yaml b/operator/config/e2e-tests-with-flags/kustomization.yaml deleted file mode 100644 index 5918a2495..000000000 --- a/operator/config/e2e-tests-with-flags/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - ../e2e-tests -patches: - - patch: |- - - op: add - path: /spec/template/spec/containers/0/args/- - value: --auto-delete-pvcs - target: - group: apps - version: v1 - kind: Deployment - name: controller-manager diff --git a/operator/internal/controller/decommissioning/statefulset_decommissioner_test.go b/operator/internal/controller/decommissioning/statefulset_decommissioner_test.go index 4e654fbff..0ffa59318 100644 --- a/operator/internal/controller/decommissioning/statefulset_decommissioner_test.go +++ b/operator/internal/controller/decommissioning/statefulset_decommissioner_test.go @@ -239,6 +239,12 @@ func (s *StatefulSetDecommissionerSuite) installChart(name string, overrides map values := map[string]any{ "statefulset": map[string]any{ "replicas": 1, + "sideCars": map[string]any{ + "image": map[string]any{ + "repository": "localhost/redpanda-operator", + "tag": "dev", + }, + }, }, "console": map[string]any{ "enabled": false, diff --git a/operator/internal/lifecycle/testdata/cases.pools.golden.txtar b/operator/internal/lifecycle/testdata/cases.pools.golden.txtar index 425815037..d522a186b 100644 --- a/operator/internal/lifecycle/testdata/cases.pools.golden.txtar +++ b/operator/internal/lifecycle/testdata/cases.pools.golden.txtar @@ -158,6 +158,7 @@ - basic-test - --redpanda-cluster-name - basic-test + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=basic-test - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).basic-test.basic-test.svc.cluster.local.:9644 @@ -519,6 +520,7 @@ - compat-test - --redpanda-cluster-name - compat-test + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=compat-test - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).compat-test.compat-test.svc.cluster.local.:9644 @@ -885,6 +887,7 @@ - compat-test - --redpanda-cluster-name - compat-test + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=compat-test - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).compat-test.compat-test.svc.cluster.local.:9644 @@ -1306,6 +1309,7 @@ - nodepool-basic-test - --redpanda-cluster-name - nodepool-basic-test + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=nodepool-basic-test - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 @@ -1668,6 +1672,7 @@ - nodepool-basic-test - --redpanda-cluster-name - nodepool-basic-test + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=nodepool-basic-test - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 @@ -2028,6 +2033,7 @@ - nodepool-basic-test - --redpanda-cluster-name - nodepool-basic-test + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=nodepool-basic-test - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 diff --git a/operator/kuttl-test-with-flags.yaml b/operator/kuttl-test-with-flags.yaml deleted file mode 100644 index a0f900b16..000000000 --- a/operator/kuttl-test-with-flags.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestSuite -startKIND: true -kindContainers: - - localhost/redpanda-operator:dev - - localhost/redpanda:dev -testDirs: - - ./tests/e2e-with-flags -kindConfig: ./kind-for-cloud.yaml -kindNodeCache: false -commands: - - command: kubectl taint node kind-control-plane node-role.kubernetes.io/control-plane- - - command: "mkdir -p tests/_e2e_with_flags_artifacts" - - command: "./hack/install-cert-manager.sh tests/_e2e_with_flags_artifacts" - background: true - ignoreFailure: true - - command: "kubectl create -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/e23ff77fceba6a5d9f190f5d1a123c87701dc964/bundle.yaml" - background: true - ignoreFailure: true - - command: "sh -c 'until kustomize build config/e2e-tests-with-flags 2>> tests/_e2e_with_flags_artifacts/kustomize-output.txt | kubectl apply --server-side -f - 1>> tests/_e2e_with_flags_artifacts/kubectl-output.txt 2>> tests/_e2e_with_flags_artifacts/kubectl-error-output.txt; do sleep 0.5; done'" - background: true - - command: "./hack/wait-for-webhook-ready.sh" -artifactsDir: tests/_e2e_with_flags_artifacts -timeout: 330 -reportFormat: xml -parallel: 2 -namespace: redpanda-system -suppress: - - events diff --git a/operator/pkg/functional/map.go b/operator/pkg/functional/map.go index 323ec8637..3004712d2 100644 --- a/operator/pkg/functional/map.go +++ b/operator/pkg/functional/map.go @@ -63,6 +63,7 @@ func deepCopyElements(v []any) []any { // including merging arrays of matching keyed arrays. Note that // the array merging behavior is *not* the same as that of Helm // so this should not be used as a replacement for that. +// Precedence is given to `second`. func MergeMaps(first, second map[string]any) map[string]any { merged := deepCopyMap(first) for k, v := range second { @@ -72,11 +73,13 @@ func MergeMaps(first, second map[string]any) map[string]any { // the types must match, otherwise we can't merge them if vmap, ok := v.(map[string]any); ok { merged[k] = MergeMaps(cast, deepCopyMap(vmap)) + continue } case []any: // the types must match, otherwise we can't merge them if varray, ok := v.([]any); ok { merged[k] = append(cast, deepCopyElements(varray)) + continue } } } diff --git a/operator/pkg/functional/map_test.go b/operator/pkg/functional/map_test.go new file mode 100644 index 000000000..f1de0b146 --- /dev/null +++ b/operator/pkg/functional/map_test.go @@ -0,0 +1,119 @@ +// Copyright 2025 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package functional_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/redpanda-data/redpanda-operator/operator/pkg/functional" +) + +func TestMergeMaps(t *testing.T) { + cases := []struct { + First map[string]any + Second map[string]any + Expect map[string]any + }{ + { + First: map[string]any{}, + Second: map[string]any{}, + Expect: map[string]any{}, + }, + { + First: map[string]any{"a": 1}, + Second: map[string]any{"b": true}, + Expect: map[string]any{"a": 1, "b": true}, + }, + { + First: map[string]any{ + "a": map[string]any{ + "b": true, + }, + }, + Second: map[string]any{ + "a": map[string]any{ + "b": 2, + "c": 1, + }, + }, + Expect: map[string]any{ + "a": map[string]any{ + "b": 2, + "c": 1, + }, + }, + }, + { + First: map[string]any{ + "statefulset": map[string]any{ + "replicas": 1, + "sideCars": map[string]any{ + "image": map[string]any{ + "repository": "localhost/redpanda-operator", + "tag": "dev", + }, + }, + }, + }, + Second: map[string]any{ + "statefulset": map[string]any{ + "replicas": 5, + }, + }, + Expect: map[string]any{ + "statefulset": map[string]any{ + "replicas": 5, + "sideCars": map[string]any{ + "image": map[string]any{ + "repository": "localhost/redpanda-operator", + "tag": "dev", + }, + }, + }, + }, + }, + { + First: map[string]any{ + "statefulset": map[string]any{ + "replicas": 5, + }, + }, + Second: map[string]any{ + "statefulset": map[string]any{ + "replicas": 1, + "sideCars": map[string]any{ + "image": map[string]any{ + "repository": "localhost/redpanda-operator", + "tag": "dev", + }, + }, + }, + }, + Expect: map[string]any{ + "statefulset": map[string]any{ + "replicas": 1, + "sideCars": map[string]any{ + "image": map[string]any{ + "repository": "localhost/redpanda-operator", + "tag": "dev", + }, + }, + }, + }, + }, + } + + for _, tc := range cases { + actual := functional.MergeMaps(tc.First, tc.Second) + require.Equal(t, tc.Expect, actual) + } +} diff --git a/operator/tests/e2e-with-flags/decommission/00-assert.yaml b/operator/tests/e2e-with-flags/decommission/00-assert.yaml deleted file mode 100644 index 4c52e74ac..000000000 --- a/operator/tests/e2e-with-flags/decommission/00-assert.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - kubectl wait --for=condition=ClusterConfigured=True cluster/up-img-admin-tls --timeout 300s --namespace $NAMESPACE - kubectl wait --for=condition=OperatorQuiescent=True cluster/up-img-admin-tls --timeout 300s --namespace $NAMESPACE ---- -apiVersion: v1 -kind: Pod -metadata: - name: decommission-0 -status: - phase: Running ---- -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: decommission -status: - replicas: 3 - currentReplicas: 3 - readyReplicas: 3 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: decommission -spec: - persistentVolumeClaimRetentionPolicy: - whenDeleted: Delete - whenScaled: Delete ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -collectors: - - command: ../../../hack/get-redpanda-info.sh diff --git a/operator/tests/e2e-with-flags/decommission/00-redpanda-cluster.yaml b/operator/tests/e2e-with-flags/decommission/00-redpanda-cluster.yaml deleted file mode 100644 index e7e51e1dd..000000000 --- a/operator/tests/e2e-with-flags/decommission/00-redpanda-cluster.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: decommission -spec: - image: "localhost/redpanda" - version: "dev" - replicas: 3 - resources: - requests: - cpu: "100m" - memory: 256Mi - limits: - cpu: "100m" - memory: 256Mi - configuration: - rpcServer: - port: 33145 - kafkaApi: - - port: 9092 - adminApi: - - port: 9644 - pandaproxyApi: - - port: 8082 - developerMode: true - additionalCommandlineArguments: - dump-memory-diagnostics-on-alloc-failure-kind: all - abort-on-seastar-bad-alloc: '' diff --git a/operator/tests/e2e-with-flags/decommission/01-assert.yaml b/operator/tests/e2e-with-flags/decommission/01-assert.yaml deleted file mode 100644 index 74aa1f116..000000000 --- a/operator/tests/e2e-with-flags/decommission/01-assert.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - kubectl wait --for=condition=ClusterConfigured=True cluster/up-img-admin-tls --timeout 300s --namespace $NAMESPACE - kubectl wait --for=condition=OperatorQuiescent=True cluster/up-img-admin-tls --timeout 300s --namespace $NAMESPACE ---- -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: decommission -status: - replicas: 2 - currentReplicas: 2 - readyReplicas: 2 ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -collectors: - - command: ../../../hack/get-redpanda-info.sh diff --git a/operator/tests/e2e-with-flags/decommission/01-redpanda-downscale.yaml b/operator/tests/e2e-with-flags/decommission/01-redpanda-downscale.yaml deleted file mode 100644 index 72fc53003..000000000 --- a/operator/tests/e2e-with-flags/decommission/01-redpanda-downscale.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: decommission -spec: - replicas: 2 diff --git a/operator/tests/e2e-with-flags/decommission/02-assert.yaml b/operator/tests/e2e-with-flags/decommission/02-assert.yaml deleted file mode 100644 index 596a7294e..000000000 --- a/operator/tests/e2e-with-flags/decommission/02-assert.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - labels: - job-name: get-broker-count -status: - containerStatuses: - - name: curl - state: - terminated: - exitCode: 0 - reason: Completed - phase: Succeeded ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -collectors: - - command: ../../../hack/get-redpanda-info.sh diff --git a/operator/tests/e2e-with-flags/decommission/02-probe.yaml b/operator/tests/e2e-with-flags/decommission/02-probe.yaml deleted file mode 100644 index bedfefc7e..000000000 --- a/operator/tests/e2e-with-flags/decommission/02-probe.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: get-broker-count -spec: - backoffLimit: 10 - template: - spec: - activeDeadlineSeconds: 90 - containers: - - name: curl - image: apteno/alpine-jq:latest - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - command: - - /bin/sh - - -c - - -ex - args: - - | - url=http://decommission-0.decommission.$NAMESPACE.svc.cluster.local:9644/v1/brokers - res=$(curl --silent -L $url | jq '. | length') - - if [[ "$res" != "2" ]]; then - exit 1; - fi - restartPolicy: Never diff --git a/operator/tests/e2e-with-flags/decommission/03-assert.yaml b/operator/tests/e2e-with-flags/decommission/03-assert.yaml deleted file mode 100644 index 3768e3c15..000000000 --- a/operator/tests/e2e-with-flags/decommission/03-assert.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- timeout: 300 - script: | - kubectl wait --for=delete pvc/datadir-decommission-2 --timeout 0s -n redpanda --namespace $NAMESPACE diff --git a/operator/tests/e2e-with-flags/decommission/04-assert.yaml b/operator/tests/e2e-with-flags/decommission/04-assert.yaml deleted file mode 100644 index d2b6c39d9..000000000 --- a/operator/tests/e2e-with-flags/decommission/04-assert.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - kubectl wait --for=condition=ClusterConfigured=True cluster/up-img-admin-tls --timeout 300s --namespace $NAMESPACE - kubectl wait --for=condition=OperatorQuiescent=True cluster/up-img-admin-tls --timeout 300s --namespace $NAMESPACE ---- -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: decommission -status: - replicas: 3 - currentReplicas: 3 - readyReplicas: 3 ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -collectors: - - command: ../../../hack/get-redpanda-info.sh diff --git a/operator/tests/e2e-with-flags/decommission/04-redpanda-upscale.yaml b/operator/tests/e2e-with-flags/decommission/04-redpanda-upscale.yaml deleted file mode 100644 index a21317812..000000000 --- a/operator/tests/e2e-with-flags/decommission/04-redpanda-upscale.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: decommission -spec: - replicas: 3 diff --git a/operator/tests/e2e-with-flags/decommission/05-assert.yaml b/operator/tests/e2e-with-flags/decommission/05-assert.yaml deleted file mode 100644 index 3e098d576..000000000 --- a/operator/tests/e2e-with-flags/decommission/05-assert.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - kubectl wait --for=condition=ClusterConfigured=True cluster/up-img-admin-tls --timeout 300s --namespace $NAMESPACE - kubectl wait --for=condition=OperatorQuiescent=True cluster/up-img-admin-tls --timeout 300s --namespace $NAMESPACE ---- -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: decommission -status: - replicas: 2 - currentReplicas: 2 ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -collectors: - - command: ../../../hack/get-redpanda-info.sh diff --git a/operator/tests/e2e-with-flags/decommission/05-redpanda-downscale.yaml b/operator/tests/e2e-with-flags/decommission/05-redpanda-downscale.yaml deleted file mode 100644 index 72fc53003..000000000 --- a/operator/tests/e2e-with-flags/decommission/05-redpanda-downscale.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: decommission -spec: - replicas: 2 diff --git a/operator/tests/e2e-with-flags/decommission/06-assert.yaml b/operator/tests/e2e-with-flags/decommission/06-assert.yaml deleted file mode 100644 index 217e4a34e..000000000 --- a/operator/tests/e2e-with-flags/decommission/06-assert.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - labels: - job-name: get-broker-count-again -status: - containerStatuses: - - name: curl - state: - terminated: - exitCode: 0 - reason: Completed - phase: Succeeded ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -collectors: - - command: ../../../hack/get-redpanda-info.sh diff --git a/operator/tests/e2e-with-flags/decommission/06-probe.yaml b/operator/tests/e2e-with-flags/decommission/06-probe.yaml deleted file mode 100644 index 5e2b2e42e..000000000 --- a/operator/tests/e2e-with-flags/decommission/06-probe.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: get-broker-count-again -spec: - backoffLimit: 10 - template: - spec: - activeDeadlineSeconds: 90 - containers: - - name: curl - image: apteno/alpine-jq:latest - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - command: - - /bin/sh - - -c - - -ex - args: - - | - url=http://decommission-0.decommission.$NAMESPACE.svc.cluster.local:9644/v1/brokers - res=$(curl --silent -L $url | jq '. | length') - - if [[ "$res" != "2" ]]; then - exit 1; - fi - restartPolicy: Never diff --git a/operator/tests/e2e-with-flags/decommission/07-assert.yaml b/operator/tests/e2e-with-flags/decommission/07-assert.yaml deleted file mode 100644 index 5fd0a664c..000000000 --- a/operator/tests/e2e-with-flags/decommission/07-assert.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- timeout: 300 - script: | - kubectl wait --for=delete pvc/datadir-decommission-0 --timeout 0s -n redpanda --namespace $NAMESPACE ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- timeout: 300 - script: | - kubectl wait --for=delete pvc/datadir-decommission-1 --timeout 0s -n redpanda --namespace $NAMESPACE ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- timeout: 300 - script: | - kubectl wait --for=delete pvc/datadir-decommission-2 --timeout 0s -n redpanda --namespace $NAMESPACE diff --git a/operator/tests/e2e-with-flags/decommission/07-clean.yaml b/operator/tests/e2e-with-flags/decommission/07-clean.yaml deleted file mode 100644 index 7654e2b38..000000000 --- a/operator/tests/e2e-with-flags/decommission/07-clean.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -delete: - - apiVersion: redpanda.vectorized.io/v1alpha1 - kind: Cluster - name: decommission - namespace: redpanda-system - - apiVersion: batch/v1 - kind: Job - name: get-broker-count - namespace: redpanda-system - - apiVersion: v1 - kind: Pod - labels: - job-name: get-broker-count - namespace: redpanda-system - - apiVersion: batch/v1 - kind: Job - name: get-broker-count-again - namespace: redpanda-system - - apiVersion: v1 - kind: Pod - labels: - job-name: get-broker-count-again - namespace: redpanda-system - diff --git a/operator/tests/e2e-with-flags/decommission/README.txt b/operator/tests/e2e-with-flags/decommission/README.txt deleted file mode 100644 index 4d0d01021..000000000 --- a/operator/tests/e2e-with-flags/decommission/README.txt +++ /dev/null @@ -1,5 +0,0 @@ -This test - -0. creates a 3 node Redpanda cluster -1. changes the cluster spec.replicas to 2 -2. check that there are only 2 brokers registered with redpanda \ No newline at end of file diff --git a/operator/tests/e2e-with-flags/nodepools-delete/00-assert.yaml b/operator/tests/e2e-with-flags/nodepools-delete/00-assert.yaml deleted file mode 100644 index 581440cdf..000000000 --- a/operator/tests/e2e-with-flags/nodepools-delete/00-assert.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - kubectl wait --for=condition=ClusterConfigured=True cluster/nodepool-cluster --timeout 300s --namespace $NAMESPACE - kubectl wait --for=condition=OperatorQuiescent=True cluster/nodepool-cluster --timeout 300s --namespace $NAMESPACE ---- -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: nodepools-delete -status: - readyReplicas: 3 - replicas: 3 - currentReplicas: 3 - upgrading: false - restarting: false - nodePools: - first: - currentReplicas: 3 - readyReplicas: 3 - replicas: 3 - restarting: false ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -collectors: - - command: ../../../hack/get-redpanda-info.sh diff --git a/operator/tests/e2e-with-flags/nodepools-delete/00-one-nodepool.yaml b/operator/tests/e2e-with-flags/nodepools-delete/00-one-nodepool.yaml deleted file mode 100644 index 1f3a64840..000000000 --- a/operator/tests/e2e-with-flags/nodepools-delete/00-one-nodepool.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: nodepools-delete -spec: - image: "localhost/redpanda" - version: "dev" - nodePools: - - name: first - replicas: 3 - storage: {} - cloudCacheStorage: {} - resources: - requests: - cpu: "100m" - memory: 256Mi - limits: - cpu: "100m" - memory: 256Mi - configuration: - rpcServer: - port: 33145 - kafkaApi: - - port: 9092 - adminApi: - - port: 9644 - developerMode: true - additionalCommandlineArguments: - dump-memory-diagnostics-on-alloc-failure-kind: all - abort-on-seastar-bad-alloc: '' - resources: {} diff --git a/operator/tests/e2e-with-flags/nodepools-delete/01-add-second-pool.yaml b/operator/tests/e2e-with-flags/nodepools-delete/01-add-second-pool.yaml deleted file mode 100644 index 1604dbd6f..000000000 --- a/operator/tests/e2e-with-flags/nodepools-delete/01-add-second-pool.yaml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: nodepools-delete -spec: - image: "localhost/redpanda" - version: "dev" - nodePools: - - name: first - replicas: 3 - storage: {} - cloudCacheStorage: {} - resources: - requests: - cpu: "100m" - memory: 256Mi - limits: - cpu: "100m" - memory: 256Mi - - name: second - replicas: 3 - storage: {} - cloudCacheStorage: {} - resources: - requests: - cpu: "101m" - memory: 257Mi - limits: - cpu: "101m" - memory: 257Mi - configuration: - rpcServer: - port: 33145 - kafkaApi: - - port: 9092 - adminApi: - - port: 9644 - developerMode: true - additionalCommandlineArguments: - dump-memory-diagnostics-on-alloc-failure-kind: all - abort-on-seastar-bad-alloc: '' - resources: {} diff --git a/operator/tests/e2e-with-flags/nodepools-delete/01-assert.yaml b/operator/tests/e2e-with-flags/nodepools-delete/01-assert.yaml deleted file mode 100644 index cfc18d4e3..000000000 --- a/operator/tests/e2e-with-flags/nodepools-delete/01-assert.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - kubectl wait --for=condition=ClusterConfigured=True cluster/nodepools-delete --timeout 300s --namespace $NAMESPACE - kubectl wait --for=condition=OperatorQuiescent=True cluster/nodepools-delete --timeout 300s --namespace $NAMESPACE ---- -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: nodepools-delete -status: - nodePools: - first: - currentReplicas: 3 - readyReplicas: 3 - replicas: 3 - restarting: false - second: - currentReplicas: 3 - readyReplicas: 3 - replicas: 3 - restarting: false ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -collectors: - - command: ../../../hack/get-redpanda-info.sh diff --git a/operator/tests/e2e-with-flags/nodepools-delete/02-assert.yaml b/operator/tests/e2e-with-flags/nodepools-delete/02-assert.yaml deleted file mode 100644 index 3b2928858..000000000 --- a/operator/tests/e2e-with-flags/nodepools-delete/02-assert.yaml +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - kubectl wait --for=delete pod/nodepools-delete-first-2 --timeout 300s --namespace $NAMESPACE ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - kubectl wait --for=delete pod/nodepools-delete-first-1 --timeout 300s --namespace $NAMESPACE ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - kubectl wait --for=delete pod/nodepools-delete-first-0 --timeout 300s --namespace $NAMESPACE ---- -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: nodepools-delete -status: - readyReplicas: 3 - replicas: 3 - currentReplicas: 3 - upgrading: false - restarting: false ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - kubectl wait --for=condition=ClusterConfigured=True cluster/nodepools-delete --timeout 300s --namespace $NAMESPACE - kubectl wait --for=condition=OperatorQuiescent=True cluster/nodepools-delete --timeout 300s --namespace $NAMESPACE ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -collectors: - - command: ../../../hack/get-redpanda-info.sh diff --git a/operator/tests/e2e-with-flags/nodepools-delete/02-delete-first-nodepool.yaml b/operator/tests/e2e-with-flags/nodepools-delete/02-delete-first-nodepool.yaml deleted file mode 100644 index bb052aef5..000000000 --- a/operator/tests/e2e-with-flags/nodepools-delete/02-delete-first-nodepool.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# Actually delete nodepool, not just scale to zero. -# WITHOUT scaling to zero first. -# Ensure that it goes 3->2->1-0->gone. -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: nodepools-delete -spec: - image: "localhost/redpanda" - version: "dev" - nodePools: - - name: second - replicas: 3 - storage: {} - cloudCacheStorage: {} - resources: - requests: - cpu: "101m" - memory: 257Mi - limits: - cpu: "101m" - memory: 257Mi - configuration: - rpcServer: - port: 33145 - kafkaApi: - - port: 9092 - adminApi: - - port: 9644 - developerMode: true - additionalCommandlineArguments: - dump-memory-diagnostics-on-alloc-failure-kind: all - abort-on-seastar-bad-alloc: '' - resources: {} diff --git a/operator/tests/e2e-with-flags/nodepools-delete/03-assert.yaml b/operator/tests/e2e-with-flags/nodepools-delete/03-assert.yaml deleted file mode 100644 index e2c6efb5d..000000000 --- a/operator/tests/e2e-with-flags/nodepools-delete/03-assert.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -collectors: - - command: ../../../hack/get-redpanda-info.sh diff --git a/operator/tests/e2e-with-flags/nodepools-delete/03-clean.yaml b/operator/tests/e2e-with-flags/nodepools-delete/03-clean.yaml deleted file mode 100644 index 3a588676e..000000000 --- a/operator/tests/e2e-with-flags/nodepools-delete/03-clean.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -delete: - - apiVersion: redpanda.vectorized.io/v1alpha1 - kind: Cluster - name: nodepools-delete - namespace: redpanda-system - - apiVersion: v1 - kind: PersistentVolumeClaim - name: datadir-nodepools-delete-0 - namespace: redpanda-system - - apiVersion: v1 - kind: PersistentVolumeClaim - name: datadir-nodepools-delete-1 - namespace: redpanda-system - - apiVersion: v1 - kind: PersistentVolumeClaim - name: datadir-nodepools-delete-2 - namespace: redpanda-system - - apiVersion: v1 - kind: PersistentVolumeClaim - name: datadir-nodepools-delete-3 - namespace: redpanda-system - - apiVersion: v1 - kind: PersistentVolumeClaim - name: datadir-nodepools-delete-4 - namespace: redpanda-system - - apiVersion: v1 - kind: PersistentVolumeClaim - name: datadir-nodepools-delete-5 - namespace: redpanda-system diff --git a/operator/tests/e2e-with-flags/nodepools/00-assert.yaml b/operator/tests/e2e-with-flags/nodepools/00-assert.yaml deleted file mode 100644 index 708fdeb6d..000000000 --- a/operator/tests/e2e-with-flags/nodepools/00-assert.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - kubectl wait --for=condition=ClusterConfigured=True cluster/nodepool-cluster --timeout 300s --namespace $NAMESPACE - kubectl wait --for=condition=OperatorQuiescent=True cluster/nodepool-cluster --timeout 300s --namespace $NAMESPACE ---- -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: nodepool-cluster -status: - version: "dev" - readyReplicas: 3 - replicas: 3 - currentReplicas: 3 - upgrading: false - restarting: false -status: - nodePools: - nodepool1: - currentReplicas: 3 - readyReplicas: 3 - replicas: 3 - restarting: false ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -collectors: - - command: ../../../hack/get-redpanda-info.sh diff --git a/operator/tests/e2e-with-flags/nodepools/00-one-nodepool.yaml b/operator/tests/e2e-with-flags/nodepools/00-one-nodepool.yaml deleted file mode 100644 index 13de3658e..000000000 --- a/operator/tests/e2e-with-flags/nodepools/00-one-nodepool.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: nodepool-cluster -spec: - image: "localhost/redpanda" - version: "dev" - nodePools: - - name: nodepool1 - replicas: 3 - storage: {} - cloudCacheStorage: {} - resources: - requests: - cpu: "100m" - memory: 256Mi - limits: - cpu: "100m" - memory: 256Mi - configuration: - rpcServer: - port: 33145 - kafkaApi: - - port: 9092 - adminApi: - - port: 9644 - developerMode: true - additionalCommandlineArguments: - dump-memory-diagnostics-on-alloc-failure-kind: all - abort-on-seastar-bad-alloc: '' - resources: {} diff --git a/operator/tests/e2e-with-flags/nodepools/01-assert.yaml b/operator/tests/e2e-with-flags/nodepools/01-assert.yaml deleted file mode 100644 index 1df594d31..000000000 --- a/operator/tests/e2e-with-flags/nodepools/01-assert.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - kubectl wait --for=condition=ClusterConfigured=True cluster/nodepool-cluster --timeout 300s --namespace $NAMESPACE - kubectl wait --for=condition=OperatorQuiescent=True cluster/nodepool-cluster --timeout 300s --namespace $NAMESPACE ---- -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: nodepool-cluster -status: - version: "dev" - readyReplicas: 6 - replicas: 6 - currentReplicas: 6 - upgrading: false - restarting: false - nodePools: - nodepool1: - currentReplicas: 3 - readyReplicas: 3 - replicas: 3 - restarting: false - nodepool2: - currentReplicas: 3 - readyReplicas: 3 - replicas: 3 - restarting: false ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -collectors: - - command: ../../../hack/get-redpanda-info.sh diff --git a/operator/tests/e2e-with-flags/nodepools/01-two-nodepools.yaml b/operator/tests/e2e-with-flags/nodepools/01-two-nodepools.yaml deleted file mode 100644 index 321c2bde8..000000000 --- a/operator/tests/e2e-with-flags/nodepools/01-two-nodepools.yaml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: nodepool-cluster -spec: - image: "localhost/redpanda" - version: "dev" - nodePools: - - name: nodepool1 - replicas: 3 - storage: {} - cloudCacheStorage: {} - resources: - requests: - cpu: "100m" - memory: 256Mi - limits: - cpu: "100m" - memory: 256Mi - - name: nodepool2 - replicas: 3 - storage: {} - cloudCacheStorage: {} - resources: - requests: - cpu: "100m" - memory: 256Mi - limits: - cpu: "100m" - memory: 256Mi - configuration: - rpcServer: - port: 33145 - kafkaApi: - - port: 9092 - adminApi: - - port: 9644 - developerMode: true - additionalCommandlineArguments: - dump-memory-diagnostics-on-alloc-failure-kind: all - abort-on-seastar-bad-alloc: '' - resources: {} diff --git a/operator/tests/e2e-with-flags/nodepools/02-assert.yaml b/operator/tests/e2e-with-flags/nodepools/02-assert.yaml deleted file mode 100644 index 93e666a3c..000000000 --- a/operator/tests/e2e-with-flags/nodepools/02-assert.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - kubectl wait --for=condition=ClusterConfigured=True cluster/nodepool-cluster --timeout 300s --namespace $NAMESPACE - kubectl wait --for=condition=OperatorQuiescent=True cluster/nodepool-cluster --timeout 300s --namespace $NAMESPACE ---- -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: nodepool-cluster -status: - version: "dev" - readyReplicas: 3 - replicas: 3 - currentReplicas: 3 - upgrading: false - restarting: false - nodePools: - nodepool1: - currentReplicas: 0 - readyReplicas: 0 - replicas: 0 - restarting: false - nodepool2: - currentReplicas: 3 - readyReplicas: 3 - replicas: 3 - restarting: false ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -collectors: - - command: ../../../hack/get-redpanda-info.sh diff --git a/operator/tests/e2e-with-flags/nodepools/02-scale-down-first.yaml b/operator/tests/e2e-with-flags/nodepools/02-scale-down-first.yaml deleted file mode 100644 index 9ee1ad345..000000000 --- a/operator/tests/e2e-with-flags/nodepools/02-scale-down-first.yaml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: redpanda.vectorized.io/v1alpha1 -kind: Cluster -metadata: - name: nodepool-cluster -spec: - image: "localhost/redpanda" - version: "dev" - nodePools: - - name: nodepool1 - replicas: 0 - storage: {} - cloudCacheStorage: {} - resources: - requests: - cpu: "100m" - memory: 256Mi - limits: - cpu: "100m" - memory: 256Mi - - name: nodepool2 - replicas: 3 - storage: {} - cloudCacheStorage: {} - resources: - requests: - cpu: "100m" - memory: 256Mi - limits: - cpu: "100m" - memory: 256Mi - configuration: - rpcServer: - port: 33145 - kafkaApi: - - port: 9092 - adminApi: - - port: 9644 - developerMode: true - additionalCommandlineArguments: - dump-memory-diagnostics-on-alloc-failure-kind: all - abort-on-seastar-bad-alloc: '' - resources: {} diff --git a/operator/tests/e2e-with-flags/nodepools/03-assert.yaml b/operator/tests/e2e-with-flags/nodepools/03-assert.yaml deleted file mode 100644 index e2c6efb5d..000000000 --- a/operator/tests/e2e-with-flags/nodepools/03-assert.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -collectors: - - command: ../../../hack/get-redpanda-info.sh diff --git a/operator/tests/e2e-with-flags/nodepools/03-clean.yaml b/operator/tests/e2e-with-flags/nodepools/03-clean.yaml deleted file mode 100644 index 5dc1205ee..000000000 --- a/operator/tests/e2e-with-flags/nodepools/03-clean.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -delete: - - apiVersion: redpanda.vectorized.io/v1alpha1 - kind: Cluster - name: nodepool-cluster - namespace: redpanda-system - - apiVersion: v1 - kind: PersistentVolumeClaim - name: datadir-nodepool-cluster-0 - namespace: redpanda-system - - apiVersion: v1 - kind: PersistentVolumeClaim - name: datadir-nodepool-cluster-1 - namespace: redpanda-system - - apiVersion: v1 - kind: PersistentVolumeClaim - name: datadir-nodepool-cluster-2 - namespace: redpanda-system - - apiVersion: v1 - kind: PersistentVolumeClaim - name: datadir-nodepool-cluster-3 - namespace: redpanda-system - - apiVersion: v1 - kind: PersistentVolumeClaim - name: datadir-nodepool-cluster-4 - namespace: redpanda-system - - apiVersion: v1 - kind: PersistentVolumeClaim - name: datadir-nodepool-cluster-5 - namespace: redpanda-system \ No newline at end of file diff --git a/pkg/k3d/k3d.go b/pkg/k3d/k3d.go index c0c1db285..30791a7af 100644 --- a/pkg/k3d/k3d.go +++ b/pkg/k3d/k3d.go @@ -260,13 +260,16 @@ func (c *Cluster) RESTConfig() *kube.RESTConfig { func (c *Cluster) ImportImage(images ...string) error { c.mu.Lock() defer c.mu.Unlock() - if out, err := exec.Command( - "k3d", + + args := []string{ "image", "import", fmt.Sprintf("--cluster=%s", c.Name), - strings.Join(images, " "), - ).CombinedOutput(); err != nil { + } + + args = append(args, images...) + + if out, err := exec.Command("k3d", args...).CombinedOutput(); err != nil { return fmt.Errorf("%w: %s", err, out) } return nil diff --git a/taskfiles/ci.yml b/taskfiles/ci.yml index b5477106e..e4bd748f8 100644 --- a/taskfiles/ci.yml +++ b/taskfiles/ci.yml @@ -52,13 +52,6 @@ tasks: - 'echo "~~~ Running kuttl V1 tests :k8s:"' - task: run-kuttl-tests - test:kuttl-v1-nodepools: - cmds: - - 'echo "~~~ Running kuttl V1 Nodepools tests :k8s:"' - - task: run-kuttl-tests - vars: - KUTTL_CONFIG_FILE: kuttl-test-with-flags.yaml - test:kuttl-v2: cmds: - 'echo "~~~ Running kuttl V2 tests :k8s:"'