Skip to content

Commit e3e338c

Browse files
committed
charts/redpanda: use new sidecar --selector flag
This commit builds upon a previous commit that added the `--selector` flag to the sidecar. It updates the chart to appropriate set this new flag and adds acceptance tests to assert the functionality works as expected. This commit is separate as the v2.x.x branches need to release the updated sidecar before it can be used.
1 parent 4ed0d6f commit e3e338c

File tree

24 files changed

+254
-292
lines changed

24 files changed

+254
-292
lines changed
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
project: charts/redpanda
2+
kind: Deprecated
3+
body: '- `statefulset.sideCars.controllers.createRBAC` is deprecated and no longer respected. In most cases, setting this field to `false` would result in a broken deployment. RBAC may be controlled via `rbac.enabled` or per controller via `statefulset.sideCars.controllers.{pvcUnbinder,brokerDecommissioner}.enabled`.'
4+
time: 2025-10-21T14:38:34.206376-04:00
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
project: charts/redpanda
2+
kind: Deprecated
3+
body: '`statefulset.sideCars.controllers.run` has been unused for many releases and is now deprecated. Individual controllers may be enabled/disabled by setting their enabled field: `statefulset.sideCars.pvcUnbinder.enabled`, `statefulset.sideCars.brokerDecommissioner.enabled`.'
4+
time: 2025-10-21T14:44:13.331483-04:00
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
@operator:none
2+
Feature: Redpanda Helm Chart
3+
4+
Scenario: Tolerating Node Failure
5+
Given I helm install "redpanda" "../charts/redpanda/chart" with values:
6+
```yaml
7+
nameOverride: foobar
8+
fullnameOverride: bazquux
9+
10+
statefulset:
11+
sideCars:
12+
pvcUnbinder:
13+
enabled: true
14+
unbindAfter: 15s
15+
brokerDecommissioner:
16+
enabled: true
17+
decommissionAfter: 15s
18+
```
19+
When I stop the Node running Pod "bazquux-2"
20+
And Pod "bazquux-2" is eventually Pending
21+
Then Pod "bazquux-2" will eventually be Running
22+
And kubectl exec -it "bazquux-0" "rpk redpanda admin brokers list | sed -E 's/\s+/ /gm' | cut -d ' ' -f 1,6" will eventually output:
23+
```
24+
ID MEMBERSHIP
25+
0 active
26+
1 active
27+
3 active
28+
```
29+
And kubectl exec -it "bazquux-0" "rpk redpanda admin brokers list --include-decommissioned | sed -E 's/\s+/ /gm' | cut -d ' ' -f 1,6" will eventually output:
30+
```
31+
ID MEMBERSHIP
32+
0 active
33+
1 active
34+
3 active
35+
2 -
36+
```

acceptance/features/migration.feature

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ Feature: Helm chart to Redpanda Operator migration
22

33
@skip:gke @skip:aks @skip:eks
44
Scenario: Migrate from a Helm chart release to a Redpanda custom resource
5-
Given a Helm release named "redpanda-migration-example" of the "redpanda/redpanda" helm chart with the values:
5+
Given I helm install "redpanda-migration-example" "../charts/redpanda/chart" with values:
66
"""
77
# tag::helm-values[]
88
fullnameOverride: name-override

acceptance/features/operator-upgrades.feature

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,8 @@ Feature: Upgrading the operator
33
@skip:gke @skip:aks @skip:eks
44
Scenario: Operator upgrade from 2.4.5
55
Given I install local CRDs from "../operator/config/crd/bases"
6-
And I install redpanda helm chart version "v2.4.5" with the values:
6+
And I helm install "redpanda-operator" "redpanda/operator" --version v2.4.5 with values:
77
"""
8-
console:
9-
enabled: false
108
"""
119
And I apply Kubernetes manifest:
1210
"""
@@ -29,7 +27,7 @@ Feature: Upgrading the operator
2927
# use just a Ready status check here since that's all the
3028
# old operator supports
3129
And cluster "operator-upgrade" is available
32-
Then I can upgrade to the latest operator with the values:
30+
Then I can helm upgrade "redpanda-operator" "../operator/chart" with values:
3331
"""
3432
image:
3533
tag: dev
@@ -42,7 +40,7 @@ Feature: Upgrading the operator
4240

4341
@skip:gke @skip:aks @skip:eks
4442
Scenario: Operator upgrade from 25.1.3
45-
And I install redpanda helm chart version "v25.1.3" with the values:
43+
Given I helm install "redpanda-operator" "redpanda/operator" --version v25.1.3 with values:
4644
"""
4745
crds:
4846
enabled: true
@@ -68,7 +66,7 @@ Feature: Upgrading the operator
6866
# use just a Ready status check here since that's all the
6967
# old operator supports
7068
And cluster "operator-upgrade" is available
71-
Then I can upgrade to the latest operator with the values:
69+
Then I can helm upgrade "redpanda-operator" "../operator/chart" with values:
7270
"""
7371
image:
7472
tag: dev

acceptance/main_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ var setupSuite = sync.OnceValues(func() (*framework.Suite, error) {
9393
return
9494
}
9595
t.Log("Installing default Redpanda operator chart")
96-
t.InstallLocalHelmChart(ctx, "../operator/chart", helm.InstallOptions{
96+
t.InstallHelmChart(ctx, "../operator/chart", helm.InstallOptions{
9797
Name: "redpanda-operator",
9898
Namespace: namespace,
9999
Values: operatorchart.PartialValues{
@@ -205,7 +205,7 @@ func OperatorTag(ctx context.Context, t framework.TestingT, args ...string) cont
205205
}
206206

207207
t.Logf("Installing Redpanda operator chart: %q", name)
208-
t.InstallLocalHelmChart(ctx, "../operator/chart", helm.InstallOptions{
208+
t.InstallHelmChart(ctx, "../operator/chart", helm.InstallOptions{
209209
Name: "redpanda-operator",
210210
Namespace: t.Namespace(),
211211
ValuesFile: filepath.Join("operator", fmt.Sprintf("%s.yaml", name)),

acceptance/steps/cluster.go

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -166,6 +166,25 @@ func shutdownRandomClusterNode(ctx context.Context, t framework.TestingT, cluste
166166
t.ShutdownNode(ctx, pod.Spec.NodeName)
167167
}
168168

169+
func shutdownNodeOfPod(ctx context.Context, t framework.TestingT, podName string) {
170+
t.ResourceKey(podName)
171+
172+
var pod corev1.Pod
173+
require.NoError(t, t.Get(ctx, t.ResourceKey(podName), &pod))
174+
175+
var node corev1.Node
176+
require.NoError(t, t.Get(ctx, t.ResourceKey(pod.Spec.NodeName), &node))
177+
178+
node.Spec.Taints = append(node.Spec.Taints, corev1.Taint{
179+
Key: "node.kubernetes.io/out-of-service",
180+
Effect: corev1.TaintEffectNoExecute,
181+
})
182+
183+
require.NoError(t, t.Update(ctx, &node))
184+
185+
t.ShutdownNode(ctx, pod.Spec.NodeName)
186+
}
187+
169188
func deleteNotReadyKubernetesNodes(ctx context.Context, t framework.TestingT) {
170189
var nodes corev1.NodeList
171190
require.NoError(t, t.List(ctx, &nodes))

acceptance/steps/helm.go

Lines changed: 20 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -12,40 +12,45 @@ package steps
1212
import (
1313
"context"
1414
"fmt"
15+
"strings"
1516

1617
"github.com/cucumber/godog"
1718
"github.com/stretchr/testify/require"
1819
corev1 "k8s.io/api/core/v1"
1920
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
20-
"k8s.io/client-go/rest"
2121
"sigs.k8s.io/yaml"
2222

2323
framework "github.com/redpanda-data/redpanda-operator/harpoon"
2424
"github.com/redpanda-data/redpanda-operator/pkg/helm"
2525
)
2626

27-
// The unused parameter is meant to specify a Helm chart place (remote or local in the file system).
28-
func iInstallHelmRelease(ctx context.Context, t framework.TestingT, helmReleaseName, _ string, values *godog.DocString) {
27+
func iHelmInstall(ctx context.Context, t framework.TestingT, name, chart, version string, values *godog.DocString) {
28+
// We don't really reference anything other than the redpanda repo, so just
29+
// handle repos as a naive check here.
30+
if strings.HasPrefix(chart, "redpanda/") {
31+
t.AddHelmRepo(ctx, "redpanda", "https://charts.redpanda.com")
32+
}
33+
2934
var valuesMap map[string]any
3035
require.NoError(t, yaml.Unmarshal([]byte(values.Content), &valuesMap))
3136

32-
helmClient, err := helm.New(helm.Options{
33-
KubeConfig: rest.CopyConfig(t.RestConfig()),
37+
t.InstallHelmChart(ctx, chart, helm.InstallOptions{
38+
Name: name,
39+
Version: version,
40+
Values: valuesMap,
41+
Namespace: t.Namespace(),
3442
})
35-
require.NoError(t, err)
36-
37-
require.NoError(t, helmClient.RepoAdd(ctx, "console", "https://charts.redpanda.com"))
43+
}
3844

39-
path := "../charts/redpanda/chart"
40-
require.NoError(t, helmClient.DependencyBuild(ctx, path))
45+
func iHelmUpgrade(ctx context.Context, t framework.TestingT, name, chart, version string, values *godog.DocString) {
46+
var valuesMap map[string]any
47+
require.NoError(t, yaml.Unmarshal([]byte(values.Content), &valuesMap))
4148

42-
t.Logf("installing chart %q", path)
43-
_, err = helmClient.Install(ctx, path, helm.InstallOptions{
44-
Name: helmReleaseName,
45-
Namespace: t.Namespace(),
49+
t.UpgradeHelmChart(ctx, name, chart, helm.UpgradeOptions{
50+
Version: version,
4651
Values: valuesMap,
52+
Namespace: t.Namespace(),
4753
})
48-
require.NoError(t, err)
4954
}
5055

5156
func iDeleteHelmReleaseSecret(ctx context.Context, t framework.TestingT, helmReleaseName string) {

acceptance/steps/helpers.go

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,7 @@ import (
3232
authenticationv1 "k8s.io/api/authentication/v1"
3333
corev1 "k8s.io/api/core/v1"
3434
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
35-
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
3635
"k8s.io/apimachinery/pkg/labels"
37-
"k8s.io/apimachinery/pkg/runtime/schema"
3836
"k8s.io/client-go/kubernetes"
3937
"k8s.io/utils/ptr"
4038
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
@@ -651,20 +649,6 @@ func clientsForOperator(ctx context.Context, includeTLS bool, serviceAccountName
651649
}
652650
}
653651

654-
func removeAllFinalizers(ctx context.Context, t framework.TestingT, gvk schema.GroupVersionKind) {
655-
list := &unstructured.UnstructuredList{}
656-
list.SetGroupVersionKind(gvk)
657-
658-
// swallow errors for non-existent crds
659-
if err := t.List(ctx, list); err == nil {
660-
for i := range list.Items {
661-
item := list.Items[i]
662-
item.SetFinalizers(nil)
663-
require.NoError(t, t.Update(ctx, &item))
664-
}
665-
}
666-
}
667-
668652
func getVersion(t framework.TestingT, version string) string {
669653
version = strings.TrimSpace(version)
670654
if version != "" {

acceptance/steps/k8s.go

Lines changed: 45 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,15 @@ import (
3636
// for type assertions
3737
var _ client.Object = (client.Object)(nil)
3838

39+
func podWillEventuallyBeInPhase(ctx context.Context, t framework.TestingT, podName string, phase string) {
40+
require.EventuallyWithT(t, func(c *assert.CollectT) {
41+
var pod corev1.Pod
42+
require.NoError(c, t.Get(ctx, t.ResourceKey(podName), &pod))
43+
44+
require.Equal(c, corev1.PodPhase(phase), pod.Status.Phase)
45+
}, 5*time.Minute, 5*time.Second)
46+
}
47+
3948
func kubernetesObjectHasClusterOwner(ctx context.Context, t framework.TestingT, groupVersionKind, resourceName, clusterName string) {
4049
var cluster redpandav1alpha2.Redpanda
4150

@@ -163,7 +172,23 @@ func execJSONPath(ctx context.Context, t framework.TestingT, jsonPath, groupVers
163172
return nil
164173
}
165174

166-
func iExecInPodMatching(
175+
func execInPodEventuallyMatches(
176+
ctx context.Context,
177+
t framework.TestingT,
178+
podName string,
179+
cmd string,
180+
expected *godog.DocString,
181+
) {
182+
ctl, err := kube.FromRESTConfig(t.RestConfig())
183+
require.NoError(t, err)
184+
185+
pod, err := kube.Get[corev1.Pod](ctx, ctl, kube.ObjectKey{Namespace: t.Namespace(), Name: podName})
186+
require.NoErrorf(t, err, "Pod with name %q not found", podName)
187+
188+
execInPod(t, ctx, ctl, pod, cmd, expected)
189+
}
190+
191+
func execInPodMatchingEventuallyMatches(
167192
ctx context.Context,
168193
t framework.TestingT,
169194
cmd,
@@ -181,11 +206,24 @@ func iExecInPodMatching(
181206

182207
require.True(t, len(pods.Items) > 0, "selector %q found no Pods", selector.String())
183208

184-
var stdout bytes.Buffer
185-
require.NoError(t, ctl.Exec(ctx, &pods.Items[0], kube.ExecOptions{
186-
Command: []string{"sh", "-c", cmd},
187-
Stdout: &stdout,
188-
}))
209+
execInPod(t, ctx, ctl, &pods.Items[0], cmd, expected)
210+
}
189211

190-
assert.Equal(t, strings.TrimSpace(expected.Content), strings.TrimSpace(stdout.String()))
212+
func execInPod(
213+
t framework.TestingT,
214+
ctx context.Context,
215+
ctl *kube.Ctl,
216+
pod *corev1.Pod,
217+
cmd string,
218+
expected *godog.DocString,
219+
) {
220+
require.EventuallyWithT(t, func(collect *assert.CollectT) {
221+
var stdout bytes.Buffer
222+
require.NoError(collect, ctl.Exec(ctx, pod, kube.ExecOptions{
223+
Command: []string{"sh", "-c", cmd},
224+
Stdout: &stdout,
225+
}))
226+
227+
assert.Equal(collect, strings.TrimSpace(expected.Content), strings.TrimSpace(stdout.String()))
228+
}, 5*time.Minute, 5*time.Second)
191229
}

0 commit comments

Comments
 (0)