Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 0 additions & 24 deletions acceptance/clusters/roles/cluster.yaml

This file was deleted.

27 changes: 27 additions & 0 deletions acceptance/clusters/vectorized/sasl/cluster.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
apiVersion: redpanda.vectorized.io/v1alpha1
kind: Cluster
metadata:
name: sasl
spec:
image: "redpandadata/redpanda"
version: "v25.2.1"
replicas: 1
enableSasl: true
resources:
requests:
cpu: "100m"
memory: 256Mi
limits:
cpu: "100m"
memory: 256Mi
configuration:
rpcServer:
port: 33145
kafkaApi:
- port: 9092
adminApi:
- port: 9644
developerMode: true
additionalCommandlineArguments:
dump-memory-diagnostics-on-alloc-failure-kind: all
abort-on-seastar-bad-alloc: ''
42 changes: 21 additions & 21 deletions acceptance/features/role-crds.feature
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
@cluster:roles
@cluster:sasl
Feature: Role CRDs
Background: Cluster available
Given cluster "roles" is available
Given cluster "sasl" is available

@skip:gke @skip:aks @skip:eks
Scenario: Manage roles
Given there is no role "admin-role" in cluster "roles"
And there are the following pre-existing users in cluster "roles"
Given there is no role "admin-role" in cluster "sasl"
And there are the following pre-existing users in cluster "sasl"
| name | password | mechanism |
| alice | password | SCRAM-SHA-256 |
| bob | password | SCRAM-SHA-256 |
When I apply Kubernetes manifest:
"""
# tag::manage-roles-with-principals[]
# In this example manifest, a role called "admin-role" is created in a cluster called "roles".
# In this example manifest, a role called "admin-role" is created in a cluster called "sasl".
# The role includes two principals (alice and bob) who will inherit the role's permissions.
---
apiVersion: cluster.redpanda.com/v1alpha2
Expand All @@ -23,27 +23,27 @@ Feature: Role CRDs
spec:
cluster:
clusterRef:
name: roles
name: sasl
principals:
- User:alice
- User:bob
# end::manage-roles-with-principals[]
"""
And role "admin-role" is successfully synced
Then role "admin-role" should exist in cluster "roles"
And role "admin-role" should have members "alice and bob" in cluster "roles"
Then role "admin-role" should exist in cluster "sasl"
And role "admin-role" should have members "alice and bob" in cluster "sasl"

@skip:gke @skip:aks @skip:eks
Scenario: Manage roles with authorization
Given there is no role "read-only-role" in cluster "roles"
And there are the following pre-existing users in cluster "roles"
Given there is no role "read-only-role" in cluster "sasl"
And there are the following pre-existing users in cluster "sasl"
| name | password | mechanism |
| charlie | password | SCRAM-SHA-256 |
When I create topic "public-test" in cluster "roles"
When I create topic "public-test" in cluster "sasl"
And I apply Kubernetes manifest:
"""
# tag::manage-roles-with-authorization[]
# In this example manifest, a role called "read-only-role" is created in a cluster called "roles".
# In this example manifest, a role called "read-only-role" is created in a cluster called "sasl".
# The role includes authorization rules that allow reading from topics with names starting with "public-".
---
apiVersion: cluster.redpanda.com/v1alpha2
Expand All @@ -53,7 +53,7 @@ Feature: Role CRDs
spec:
cluster:
clusterRef:
name: roles
name: sasl
principals:
- User:charlie
authorization:
Expand All @@ -67,16 +67,16 @@ Feature: Role CRDs
# end::manage-roles-with-authorization[]
"""
And role "read-only-role" is successfully synced
Then role "read-only-role" should exist in cluster "roles"
And role "read-only-role" should have ACLs for topic pattern "public-" in cluster "roles"
And "charlie" should be able to read from topic "public-test" in cluster "roles"
Then role "read-only-role" should exist in cluster "sasl"
And role "read-only-role" should have ACLs for topic pattern "public-" in cluster "sasl"
And "charlie" should be able to read from topic "public-test" in cluster "sasl"

@skip:gke @skip:aks @skip:eks
Scenario: Manage authorization-only roles
Given there are the following pre-existing users in cluster "roles"
Given there are the following pre-existing users in cluster "sasl"
| name | password | mechanism |
| travis | password | SCRAM-SHA-256 |
And there is a pre-existing role "travis-role" in cluster "roles"
And there is a pre-existing role "travis-role" in cluster "sasl"
When I apply Kubernetes manifest:
"""
# tag::manage-authz-only-roles[]
Expand All @@ -91,7 +91,7 @@ Feature: Role CRDs
spec:
cluster:
clusterRef:
name: roles
name: sasl
principals:
- User:travis
authorization:
Expand All @@ -106,5 +106,5 @@ Feature: Role CRDs
"""
And role "travis-role" is successfully synced
And I delete the CRD role "travis-role"
Then there should still be role "travis-role" in cluster "roles"
And there should be no ACLs for role "travis-role" in cluster "roles"
Then there should still be role "travis-role" in cluster "sasl"
And there should be no ACLs for role "travis-role" in cluster "sasl"
103 changes: 103 additions & 0 deletions acceptance/features/vectorized-role-crds.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
@cluster:vectorized/sasl
Feature: Vectorized Role CRDs
Background: Cluster available
Given vectorized cluster "sasl" is available

@skip:gke @skip:aks @skip:eks
Scenario: Manage vectorized roles
Given there is no role "admin-role" in vectorized cluster "sasl"
And there are the following pre-existing users in vectorized cluster "sasl"
| name | password | mechanism |
| alice | password | SCRAM-SHA-256 |
| bob | password | SCRAM-SHA-256 |
When I apply Kubernetes manifest:
"""
---
apiVersion: cluster.redpanda.com/v1alpha2
kind: Role
metadata:
name: admin-role
spec:
cluster:
clusterRef:
group: redpanda.vectorized.io
kind: Cluster
name: sasl
principals:
- User:alice
- User:bob
"""
And role "admin-role" is successfully synced
Then role "admin-role" should exist in vectorized cluster "sasl"
And role "admin-role" should have members "alice and bob" in vectorized cluster "sasl"

@skip:gke @skip:aks @skip:eks
Scenario: Manage vectorized roles with authorization
Given there is no role "read-only-role" in vectorized cluster "sasl"
And there are the following pre-existing users in vectorized cluster "sasl"
| name | password | mechanism |
| charlie | password | SCRAM-SHA-256 |
When I create topic "public-test" in vectorized cluster "sasl"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could this step be replaced with Topic custom resource. I understand test should as much as possible test isolated features/changes, but I think acceptance test suite can combine more custom resources under test.

And I apply Kubernetes manifest:
"""
---
apiVersion: cluster.redpanda.com/v1alpha2
kind: Role
metadata:
name: read-only-role
spec:
cluster:
clusterRef:
group: redpanda.vectorized.io
kind: Cluster
name: sasl
principals:
- User:charlie
authorization:
acls:
- type: allow
resource:
type: topic
name: public-
patternType: prefixed
operations: [Read, Describe]
"""
And role "read-only-role" is successfully synced
Then role "read-only-role" should exist in vectorized cluster "sasl"
And role "read-only-role" should have ACLs for topic pattern "public-" in vectorized cluster "sasl"
And "charlie" should be able to read from topic "public-test" in vectorized cluster "sasl"

@skip:gke @skip:aks @skip:eks
Scenario: Manage vectorized authorization-only roles
Given there are the following pre-existing users in vectorized cluster "sasl"
| name | password | mechanism |
| travis | password | SCRAM-SHA-256 |
And there is a pre-existing role "travis-role" in vectorized cluster "sasl"
When I apply Kubernetes manifest:
"""
---
apiVersion: cluster.redpanda.com/v1alpha2
kind: Role
metadata:
name: travis-role
spec:
cluster:
clusterRef:
group: redpanda.vectorized.io
kind: Cluster
name: sasl
principals:
- User:travis
authorization:
acls:
- type: allow
resource:
type: topic
name: some-topic
patternType: prefixed
operations: [Read]
"""
And role "travis-role" is successfully synced
And I delete the CRD role "travis-role"
Then there should still be role "travis-role" in vectorized cluster "sasl"
And there should be no ACLs for role "travis-role" in vectorized cluster "sasl"
10 changes: 9 additions & 1 deletion acceptance/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"github.com/redpanda-data/redpanda-operator/harpoon/providers"
redpandav1alpha1 "github.com/redpanda-data/redpanda-operator/operator/api/redpanda/v1alpha1"
redpandav1alpha2 "github.com/redpanda-data/redpanda-operator/operator/api/redpanda/v1alpha2"
vectorizedv1alpha1 "github.com/redpanda-data/redpanda-operator/operator/api/vectorized/v1alpha1"
operatorchart "github.com/redpanda-data/redpanda-operator/operator/chart"
"github.com/redpanda-data/redpanda-operator/pkg/helm"
"github.com/redpanda-data/redpanda-operator/pkg/otelutil"
Expand Down Expand Up @@ -60,7 +61,7 @@ var setupSuite = sync.OnceValues(func() (*framework.Suite, error) {
"quay.io/jetstack/cert-manager-startupapicheck:v1.14.2",
"quay.io/jetstack/cert-manager-webhook:v1.14.2",
}...).
WithSchemeFunctions(redpandav1alpha1.Install, redpandav1alpha2.Install).
WithSchemeFunctions(vectorizedv1alpha1.Install, redpandav1alpha1.Install, redpandav1alpha2.Install).
WithHelmChart("https://charts.jetstack.io", "jetstack", "cert-manager", helm.InstallOptions{
Name: "cert-manager",
Namespace: "cert-manager",
Expand Down Expand Up @@ -88,9 +89,16 @@ var setupSuite = sync.OnceValues(func() (*framework.Suite, error) {
Repository: ptr.To(imageRepo),
},
CRDs: &operatorchart.PartialCRDs{
Enabled: ptr.To(true),
Experimental: ptr.To(true),
},
VectorizedControllers: &operatorchart.PartialVectorizedControllers{
Enabled: ptr.To(true),
},
AdditionalCmdFlags: []string{
// For the v1 controllers since otherwise we'll attempt to always
// pull the locally built operator which will result in errors
"--configurator-image-pull-policy=IfNotPresent",
// These are needed for running decommissioning tests.
"--additional-controllers=nodeWatcher,decommission",
"--unbind-pvcs-after=5s",
Expand Down
46 changes: 45 additions & 1 deletion acceptance/steps/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
"context"
"fmt"
"math/rand/v2"
"strings"
"time"

"github.com/redpanda-data/common-go/rpadmin"
Expand All @@ -24,9 +25,52 @@ import (

framework "github.com/redpanda-data/redpanda-operator/harpoon"
redpandav1alpha2 "github.com/redpanda-data/redpanda-operator/operator/api/redpanda/v1alpha2"
vectorizedv1alpha1 "github.com/redpanda-data/redpanda-operator/operator/api/vectorized/v1alpha1"
)

func checkClusterAvailability(ctx context.Context, t framework.TestingT, clusterName string) {
func checkClusterAvailability(ctx context.Context, t framework.TestingT, version, clusterName string) {
version = strings.TrimSpace(version)

if version == "vectorized" {
checkV1ClusterAvailability(ctx, t, clusterName)
return
}
checkV2ClusterAvailability(ctx, t, clusterName)
}

func checkV1ClusterAvailability(ctx context.Context, t framework.TestingT, clusterName string) {
var cluster vectorizedv1alpha1.Cluster

key := t.ResourceKey(clusterName)

t.Logf("Checking cluster %q is ready", clusterName)
require.Eventually(t, func() bool {
require.NoError(t, t.Get(ctx, key, &cluster))
hasConditionQuiescent := hasV1Condition(vectorizedv1alpha1.ClusterCondition{
Type: vectorizedv1alpha1.OperatorQuiescentConditionType,
Status: corev1.ConditionTrue,
}, cluster.Status.Conditions)

hasCondition := hasConditionQuiescent

t.Logf(`Checking cluster resource conditions contains "OperatorQuiescent"? %v`, hasCondition)
return hasCondition
}, 5*time.Minute, 5*time.Second, "%s", delayLog(func() string {
return fmt.Sprintf(`Cluster %q never contained the condition reason "OperatorQuiescent", final Conditions: %+v`, key.String(), cluster.Status.Conditions)
}))
t.Logf("Cluster %q is ready!", clusterName)
}

func hasV1Condition(expected vectorizedv1alpha1.ClusterCondition, conditions []vectorizedv1alpha1.ClusterCondition) bool {
for _, condition := range conditions {
if expected.Type == condition.Type && expected.Status == condition.Status {
return true
}
}
return false
}

func checkV2ClusterAvailability(ctx context.Context, t framework.TestingT, clusterName string) {
var cluster redpandav1alpha2.Redpanda

key := t.ResourceKey(clusterName)
Expand Down
Loading
Loading