diff --git a/acceptance/clusters/vectorized/basic/cluster.yaml b/acceptance/clusters/vectorized/basic/cluster.yaml new file mode 100644 index 000000000..e93100abf --- /dev/null +++ b/acceptance/clusters/vectorized/basic/cluster.yaml @@ -0,0 +1,28 @@ +apiVersion: redpanda.vectorized.io/v1alpha1 +kind: Cluster +metadata: + name: basic +spec: + image: "redpandadata/redpanda" + version: "v25.2.1" + replicas: 1 + resources: + requests: + cpu: "100m" + memory: 256Mi + limits: + cpu: "100m" + memory: 256Mi + configuration: + rpcServer: + port: 33145 + kafkaApi: + - port: 9092 + adminApi: + - port: 9644 + schemaRegistryApi: + - port: 8081 + developerMode: true + additionalCommandlineArguments: + dump-memory-diagnostics-on-alloc-failure-kind: all + abort-on-seastar-bad-alloc: '' diff --git a/acceptance/clusters/vectorized/sasl/cluster.yaml b/acceptance/clusters/vectorized/sasl/cluster.yaml index ed0b8613a..d94e8f193 100644 --- a/acceptance/clusters/vectorized/sasl/cluster.yaml +++ b/acceptance/clusters/vectorized/sasl/cluster.yaml @@ -21,6 +21,8 @@ spec: - port: 9092 adminApi: - port: 9644 + schemaRegistryApi: + - port: 8081 developerMode: true additionalCommandlineArguments: dump-memory-diagnostics-on-alloc-failure-kind: all diff --git a/acceptance/features/role-crds.feature b/acceptance/features/role-crds.feature index 90397ec20..20db87c6d 100644 --- a/acceptance/features/role-crds.feature +++ b/acceptance/features/role-crds.feature @@ -1,4 +1,4 @@ -@cluster:sasl +@cluster:sasl @variant:vectorized Feature: Role CRDs Background: Cluster available Given cluster "sasl" is available diff --git a/acceptance/features/schema-crds.feature b/acceptance/features/schema-crds.feature index 0ffe46a1e..3706aec03 100644 --- a/acceptance/features/schema-crds.feature +++ b/acceptance/features/schema-crds.feature @@ -1,4 +1,4 @@ -@cluster:basic +@cluster:basic @variant:vectorized Feature: Schema CRDs Background: Cluster available Given cluster "basic" is available diff --git a/acceptance/features/topic-crds.feature b/acceptance/features/topic-crds.feature index 037d40058..a84c10112 100644 --- a/acceptance/features/topic-crds.feature +++ b/acceptance/features/topic-crds.feature @@ -1,4 +1,4 @@ -@cluster:basic +@cluster:basic @variant:vectorized Feature: Topic CRDs Background: Cluster available Given cluster "basic" is available diff --git a/acceptance/features/user-crds.feature b/acceptance/features/user-crds.feature index 5dbeda4a3..ada347826 100644 --- a/acceptance/features/user-crds.feature +++ b/acceptance/features/user-crds.feature @@ -1,4 +1,4 @@ -@cluster:sasl +@cluster:sasl @variant:vectorized Feature: User CRDs Background: Cluster available Given cluster "sasl" is available diff --git a/acceptance/features/vectorized-role-crds.feature b/acceptance/features/vectorized-role-crds.feature deleted file mode 100644 index c339953e7..000000000 --- a/acceptance/features/vectorized-role-crds.feature +++ /dev/null @@ -1,103 +0,0 @@ -@cluster:vectorized/sasl -Feature: Vectorized Role CRDs - Background: Cluster available - Given vectorized cluster "sasl" is available - - @skip:gke @skip:aks @skip:eks - Scenario: Manage vectorized roles - Given there is no role "admin-role" in vectorized cluster "sasl" - And there are the following pre-existing users in vectorized cluster "sasl" - | name | password | mechanism | - | alice | password | SCRAM-SHA-256 | - | bob | password | SCRAM-SHA-256 | - When I apply Kubernetes manifest: - """ - --- - apiVersion: cluster.redpanda.com/v1alpha2 - kind: Role - metadata: - name: admin-role - spec: - cluster: - clusterRef: - group: redpanda.vectorized.io - kind: Cluster - name: sasl - principals: - - User:alice - - User:bob - """ - And role "admin-role" is successfully synced - Then role "admin-role" should exist in vectorized cluster "sasl" - And role "admin-role" should have members "alice and bob" in vectorized cluster "sasl" - - @skip:gke @skip:aks @skip:eks - Scenario: Manage vectorized roles with authorization - Given there is no role "read-only-role" in vectorized cluster "sasl" - And there are the following pre-existing users in vectorized cluster "sasl" - | name | password | mechanism | - | charlie | password | SCRAM-SHA-256 | - When I create topic "public-test" in vectorized cluster "sasl" - And I apply Kubernetes manifest: - """ - --- - apiVersion: cluster.redpanda.com/v1alpha2 - kind: Role - metadata: - name: read-only-role - spec: - cluster: - clusterRef: - group: redpanda.vectorized.io - kind: Cluster - name: sasl - principals: - - User:charlie - authorization: - acls: - - type: allow - resource: - type: topic - name: public- - patternType: prefixed - operations: [Read, Describe] - """ - And role "read-only-role" is successfully synced - Then role "read-only-role" should exist in vectorized cluster "sasl" - And role "read-only-role" should have ACLs for topic pattern "public-" in vectorized cluster "sasl" - And "charlie" should be able to read from topic "public-test" in vectorized cluster "sasl" - - @skip:gke @skip:aks @skip:eks - Scenario: Manage vectorized authorization-only roles - Given there are the following pre-existing users in vectorized cluster "sasl" - | name | password | mechanism | - | travis | password | SCRAM-SHA-256 | - And there is a pre-existing role "travis-role" in vectorized cluster "sasl" - When I apply Kubernetes manifest: - """ - --- - apiVersion: cluster.redpanda.com/v1alpha2 - kind: Role - metadata: - name: travis-role - spec: - cluster: - clusterRef: - group: redpanda.vectorized.io - kind: Cluster - name: sasl - principals: - - User:travis - authorization: - acls: - - type: allow - resource: - type: topic - name: some-topic - patternType: prefixed - operations: [Read] - """ - And role "travis-role" is successfully synced - And I delete the CRD role "travis-role" - Then there should still be role "travis-role" in vectorized cluster "sasl" - And there should be no ACLs for role "travis-role" in vectorized cluster "sasl" \ No newline at end of file diff --git a/acceptance/main_test.go b/acceptance/main_test.go index 25ff0f777..cd8aa0be4 100644 --- a/acceptance/main_test.go +++ b/acceptance/main_test.go @@ -137,6 +137,10 @@ func ClusterTag(ctx context.Context, t framework.TestingT, args ...string) conte require.Greater(t, len(args), 0, "clusters tags can only be used with additional arguments") name := args[0] + if variant := t.Variant(); variant != "" { + name = filepath.Join(variant, name) + } + t.Logf("Installing cluster %q", name) t.ApplyManifest(ctx, filepath.Join("clusters", name)) t.Logf("Finished installing cluster %q", name) diff --git a/acceptance/steps/cluster.go b/acceptance/steps/cluster.go index cd3fda344..a220ddfae 100644 --- a/acceptance/steps/cluster.go +++ b/acceptance/steps/cluster.go @@ -13,7 +13,6 @@ import ( "context" "fmt" "math/rand/v2" - "strings" "time" "github.com/redpanda-data/common-go/rpadmin" @@ -29,9 +28,7 @@ import ( ) func checkClusterAvailability(ctx context.Context, t framework.TestingT, version, clusterName string) { - version = strings.TrimSpace(version) - - if version == "vectorized" { + if getVersion(t, version) == "vectorized" { checkV1ClusterAvailability(ctx, t, clusterName) return } diff --git a/acceptance/steps/helpers.go b/acceptance/steps/helpers.go index 1cf7732b5..02c586113 100644 --- a/acceptance/steps/helpers.go +++ b/acceptance/steps/helpers.go @@ -199,10 +199,15 @@ func (c *clusterClients) checkSchema(ctx context.Context, schema string, exists t.Logf("Pulling list of schema subjects from cluster") schemaRegistry := c.SchemaRegistry(ctx) subjects, err = schemaRegistry.Subjects(ctx) - require.NoError(t, err) + if err != nil { + // just retry on error, sometimes v1 stuff is slow to come up even after + // the broker is marked as healthy + return false + } return exists == slices.Contains(subjects, schema) }, 10*time.Second, 1*time.Second, message) { + require.NoError(t, err) t.Errorf("Final list of schema subjects: %v", subjects) } } @@ -293,7 +298,7 @@ func (c *clusterClients) checkRole(ctx context.Context, role string, exists bool } func versionedClientsForCluster(ctx context.Context, version, cluster string) *clusterClients { - version = strings.TrimSpace(version) + version = getVersion(framework.T(ctx), version) framework.T(ctx).Logf("Got versioned cluster %q", version) @@ -392,7 +397,7 @@ func v1ClientsForCluster(ctx context.Context, cluster string) *clusterClients { return clients } -func usersFromACLTable(t framework.TestingT, cluster string, table *godog.Table) []*redpandav1alpha2.User { +func usersFromACLTable(t framework.TestingT, version, cluster string, table *godog.Table) []*redpandav1alpha2.User { var users []*redpandav1alpha2.User for i, row := range table.Rows { @@ -404,13 +409,13 @@ func usersFromACLTable(t framework.TestingT, cluster string, table *godog.Table) name, acls := row.Cells[0].Value, row.Cells[1].Value name, acls = strings.TrimSpace(name), strings.TrimSpace(acls) - users = append(users, userFromRow(t, cluster, name, "", "", acls)) + users = append(users, userFromRow(t, version, cluster, name, "", "", acls)) } return users } -func usersFromAuthTable(t framework.TestingT, cluster string, table *godog.Table) []*redpandav1alpha2.User { +func usersFromAuthTable(t framework.TestingT, version, cluster string, table *godog.Table) []*redpandav1alpha2.User { var users []*redpandav1alpha2.User for i, row := range table.Rows { @@ -422,13 +427,13 @@ func usersFromAuthTable(t framework.TestingT, cluster string, table *godog.Table name, password, mechanism := row.Cells[0].Value, row.Cells[1].Value, row.Cells[2].Value name, password, mechanism = strings.TrimSpace(name), strings.TrimSpace(password), strings.TrimSpace(mechanism) - users = append(users, userFromRow(t, cluster, name, password, mechanism, "")) + users = append(users, userFromRow(t, version, cluster, name, password, mechanism, "")) } return users } -func usersFromFullTable(t framework.TestingT, cluster string, table *godog.Table) []*redpandav1alpha2.User { +func usersFromFullTable(t framework.TestingT, version, cluster string, table *godog.Table) []*redpandav1alpha2.User { var users []*redpandav1alpha2.User for i, row := range table.Rows { @@ -440,13 +445,15 @@ func usersFromFullTable(t framework.TestingT, cluster string, table *godog.Table name, password, mechanism, acls := row.Cells[0].Value, row.Cells[1].Value, row.Cells[2].Value, row.Cells[3].Value name, password, mechanism, acls = strings.TrimSpace(name), strings.TrimSpace(password), strings.TrimSpace(mechanism), strings.TrimSpace(acls) - users = append(users, userFromRow(t, cluster, name, password, mechanism, acls)) + users = append(users, userFromRow(t, version, cluster, name, password, mechanism, acls)) } return users } -func userFromRow(t framework.TestingT, cluster, name, password, mechanism, acls string) *redpandav1alpha2.User { +func userFromRow(t framework.TestingT, version, cluster, name, password, mechanism, acls string) *redpandav1alpha2.User { + version = getVersion(t, version) + user := &redpandav1alpha2.User{ ObjectMeta: metav1.ObjectMeta{ Namespace: t.Namespace(), @@ -460,6 +467,12 @@ func userFromRow(t framework.TestingT, cluster, name, password, mechanism, acls }, }, } + + if version == "vectorized" { + user.Spec.ClusterSource.ClusterRef.Group = ptr.To("redpanda.vectorized.io") + user.Spec.ClusterSource.ClusterRef.Kind = ptr.To("Cluster") + } + if mechanism != "" || password != "" { user.Spec.Authentication = &redpandav1alpha2.UserAuthenticationSpec{ Type: ptr.To(redpandav1alpha2.SASLMechanism(mechanism)), @@ -619,3 +632,12 @@ func removeAllFinalizers(ctx context.Context, t framework.TestingT, gvk schema.G } } } + +func getVersion(t framework.TestingT, version string) string { + version = strings.TrimSpace(version) + if version != "" { + return version + } + + return t.Variant() +} diff --git a/acceptance/steps/manifest.go b/acceptance/steps/manifest.go index c0f6e0648..2155b0311 100644 --- a/acceptance/steps/manifest.go +++ b/acceptance/steps/manifest.go @@ -12,9 +12,11 @@ package steps import ( "context" "os" + "strings" "github.com/cucumber/godog" "github.com/stretchr/testify/require" + "sigs.k8s.io/yaml" framework "github.com/redpanda-data/redpanda-operator/harpoon" ) @@ -23,7 +25,7 @@ func iApplyKubernetesManifest(ctx context.Context, t framework.TestingT, manifes file, err := os.CreateTemp("", "manifest-*.yaml") require.NoError(t, err) - _, err = file.Write([]byte(manifest.Content)) + _, err = file.Write(normalizeContent(t, manifest.Content)) require.NoError(t, err) require.NoError(t, file.Close()) @@ -37,3 +39,43 @@ func iApplyKubernetesManifest(ctx context.Context, t framework.TestingT, manifes func iInstallLocalCRDs(ctx context.Context, t framework.TestingT, directory string) { t.ApplyManifest(ctx, directory) } + +func normalizeContent(t framework.TestingT, content string) []byte { + manifest := map[string]any{} + require.NoError(t, yaml.Unmarshal([]byte(content), &manifest)) + + if getVersion(t, "") == "vectorized" { + addStringValueAtPath(manifest, "redpanda.vectorized.io", "spec.cluster.clusterRef.group") + addStringValueAtPath(manifest, "Cluster", "spec.cluster.clusterRef.kind") + } + + contentBytes, err := yaml.Marshal(manifest) + require.NoError(t, err) + + return contentBytes +} + +func addStringValueAtPath(manifest map[string]any, value string, path string) { + keys := strings.Split(path, ".") + + current := manifest + for i, key := range keys { + if i == len(keys)-1 { + break + } + found, ok := current[key] + if !ok { + // all but the final key must exist in the path + return + } + cast, ok := found.(map[string]any) + if !ok { + return + } + current = cast + } + if len(keys) > 0 { + last := keys[len(keys)-1] + current[last] = value + } +} diff --git a/acceptance/steps/register.go b/acceptance/steps/register.go index f2b9802d1..b2d5de86d 100644 --- a/acceptance/steps/register.go +++ b/acceptance/steps/register.go @@ -15,33 +15,32 @@ func init() { // General scenario steps framework.RegisterStep(`^(vectorized )?cluster "([^"]*)" is available$`, checkClusterAvailability) framework.RegisterStep(`^I apply Kubernetes manifest:$`, iApplyKubernetesManifest) - framework.RegisterStep(`^I store "([^"]*)" of Kubernetes object with type "([^"]*)" and name "([^"]*)" as "([^"]*)"$`, recordVariable) framework.RegisterStep(`^the recorded value "([^"]*)" has the same value as "([^"]*)" of the Kubernetes object with type "([^"]*)" and name "([^"]*)"$`, assertVariableValue) framework.RegisterStep(`^the recorded value "([^"]*)" is one less than "([^"]*)" of the Kubernetes object with type "([^"]*)" and name "([^"]*)"$`, assertVariableValueIncremented) // Schema scenario steps - framework.RegisterStep(`^there is no schema "([^"]*)" in cluster "([^"]*)"$`, thereIsNoSchema) + framework.RegisterStep(`^there is no schema "([^"]*)" in( vectorized)? cluster "([^"]*)"$`, thereIsNoSchema) framework.RegisterStep(`^schema "([^"]*)" is successfully synced$`, schemaIsSuccessfullySynced) - framework.RegisterStep(`^I should be able to check compatibility against "([^"]*)" in cluster "([^"]*)"$`, iShouldBeAbleToCheckCompatibilityAgainst) + framework.RegisterStep(`^I should be able to check compatibility against "([^"]*)" in( vectorized)? cluster "([^"]*)"$`, iShouldBeAbleToCheckCompatibilityAgainst) // Topic scenario steps - framework.RegisterStep(`^there is no topic "([^"]*)" in cluster "([^"]*)"$`, thereIsNoTopic) + framework.RegisterStep(`^there is no topic "([^"]*)" in( vectorized)? cluster "([^"]*)"$`, thereIsNoTopic) framework.RegisterStep(`^topic "([^"]*)" is successfully synced$`, topicIsSuccessfullySynced) - framework.RegisterStep(`^I should be able to produce and consume from "([^"]*)" in cluster "([^"]*)"$`, iShouldBeAbleToProduceAndConsumeFrom) + framework.RegisterStep(`^I should be able to produce and consume from "([^"]*)" in( vectorized)? cluster "([^"]*)"$`, iShouldBeAbleToProduceAndConsumeFrom) framework.RegisterStep(`I create topic "([^"]*)" in( vectorized)? cluster "([^"]*)"`, iCreateTopicInCluster) // User scenario steps framework.RegisterStep(`^user "([^"]*)" is successfully synced$`, userIsSuccessfullySynced) framework.RegisterStep(`^"([^"]*)" should be able to read from topic "([^"]*)" in( vectorized)? cluster "([^"]*)"$`, userShouldBeAbleToReadFromTopicInCluster) framework.RegisterStep(`^there is no user "([^"]*)" in( vectorized)? cluster "([^"]*)"$`, thereIsNoUser) - framework.RegisterStep(`^there are already the following ACLs in cluster "([^"]*)":$`, thereAreAlreadyTheFollowingACLsInCluster) + framework.RegisterStep(`^there are already the following ACLs in( vectorized)? cluster "([^"]*)":$`, thereAreAlreadyTheFollowingACLsInCluster) framework.RegisterStep(`^there are the following pre-existing users in( vectorized)? cluster "([^"]*)"$`, thereAreTheFollowingPreexistingUsersInCluster) - framework.RegisterStep(`^I create CRD-based users for cluster "([^"]*)":$`, iCreateCRDbasedUsers) + framework.RegisterStep(`^I create CRD-based users for( vectorized)? cluster "([^"]*)":$`, iCreateCRDbasedUsers) framework.RegisterStep(`^I delete the CRD user "([^"]*)"$`, iDeleteTheCRDUser) - framework.RegisterStep(`^there should be ACLs in the cluster "([^"]*)" for user "([^"]*)"$`, thereShouldBeACLsInTheClusterForUser) - framework.RegisterStep(`^"([^"]*)" should exist and be able to authenticate to the "([^"]*)" cluster$`, shouldExistAndBeAbleToAuthenticateToTheCluster) - framework.RegisterStep(`^"([^"]*)" should be able to authenticate to the "([^"]*)" cluster with password "([^"]*)" and mechanism "([^"]*)"$`, shouldBeAbleToAuthenticateToTheClusterWithPasswordAndMechanism) + framework.RegisterStep(`^there should be ACLs in the( vectorized)? cluster "([^"]*)" for user "([^"]*)"$`, thereShouldBeACLsInTheClusterForUser) + framework.RegisterStep(`^"([^"]*)" should exist and be able to authenticate to the( vectorized)? "([^"]*)" cluster$`, shouldExistAndBeAbleToAuthenticateToTheCluster) + framework.RegisterStep(`^"([^"]*)" should be able to authenticate to the( vectorized)? "([^"]*)" cluster with password "([^"]*)" and mechanism "([^"]*)"$`, shouldBeAbleToAuthenticateToTheClusterWithPasswordAndMechanism) // Role scenario steps framework.RegisterStep(`^role "([^"]*)" is successfully synced$`, roleIsSuccessfullySynced) diff --git a/acceptance/steps/schemas.go b/acceptance/steps/schemas.go index 5d19430d9..62913d147 100644 --- a/acceptance/steps/schemas.go +++ b/acceptance/steps/schemas.go @@ -34,11 +34,11 @@ func schemaIsSuccessfullySynced(ctx context.Context, t framework.TestingT, schem }, schemaObject.Status.Conditions) } -func thereIsNoSchema(ctx context.Context, schema, cluster string) { - clientsForCluster(ctx, cluster).ExpectNoSchema(ctx, schema) +func thereIsNoSchema(ctx context.Context, schema, version, cluster string) { + versionedClientsForCluster(ctx, version, cluster).ExpectNoSchema(ctx, schema) } -func iShouldBeAbleToCheckCompatibilityAgainst(ctx context.Context, schema, cluster string) { - clients := clientsForCluster(ctx, cluster) +func iShouldBeAbleToCheckCompatibilityAgainst(ctx context.Context, schema, version, cluster string) { + clients := versionedClientsForCluster(ctx, version, cluster) clients.ExpectSchema(ctx, schema) } diff --git a/acceptance/steps/topics.go b/acceptance/steps/topics.go index 9168ecd8b..7145a1304 100644 --- a/acceptance/steps/topics.go +++ b/acceptance/steps/topics.go @@ -35,18 +35,18 @@ func topicIsSuccessfullySynced(ctx context.Context, t framework.TestingT, topic }, topicObject.Status.Conditions) } -func thereIsNoTopic(ctx context.Context, topic, cluster string) { - clientsForCluster(ctx, cluster).ExpectNoTopic(ctx, topic) +func thereIsNoTopic(ctx context.Context, topic, version, cluster string) { + versionedClientsForCluster(ctx, version, cluster).ExpectNoTopic(ctx, topic) } func iCreateTopicInCluster(ctx context.Context, topic, version, cluster string) { versionedClientsForCluster(ctx, version, cluster).CreateTopic(ctx, topic) } -func iShouldBeAbleToProduceAndConsumeFrom(ctx context.Context, t framework.TestingT, topic, cluster string) { +func iShouldBeAbleToProduceAndConsumeFrom(ctx context.Context, t framework.TestingT, topic, version, cluster string) { payload := []byte("test") - clients := clientsForCluster(ctx, cluster) + clients := versionedClientsForCluster(ctx, version, cluster) clients.ExpectTopic(ctx, topic) kafkaClient := clients.Kafka(ctx) diff --git a/acceptance/steps/users.go b/acceptance/steps/users.go index b45a44704..f71cb7e2c 100644 --- a/acceptance/steps/users.go +++ b/acceptance/steps/users.go @@ -39,8 +39,8 @@ func userIsSuccessfullySynced(ctx context.Context, t framework.TestingT, user st }, userObject.Status.Conditions) } -func iCreateCRDbasedUsers(ctx context.Context, t framework.TestingT, cluster string, users *godog.Table) { - for _, user := range usersFromFullTable(t, cluster, users) { +func iCreateCRDbasedUsers(ctx context.Context, t framework.TestingT, version, cluster string, users *godog.Table) { + for _, user := range usersFromFullTable(t, version, cluster, users) { user := user t.Logf("Creating user %q", user.Name) @@ -84,8 +84,8 @@ func iDeleteTheCRDUser(ctx context.Context, t framework.TestingT, user string) { require.NoError(t, t.Delete(ctx, &userObject)) } -func thereAreAlreadyTheFollowingACLsInCluster(ctx context.Context, t framework.TestingT, cluster string, acls *godog.Table) { - clients := clientsForCluster(ctx, cluster) +func thereAreAlreadyTheFollowingACLsInCluster(ctx context.Context, t framework.TestingT, version, cluster string, acls *godog.Table) { + clients := versionedClientsForCluster(ctx, version, cluster) aclClient := clients.ACLs(ctx) // throw this in a cleanup instead of a defer since we use it in a cleanup // below and it needs to stay alive until then @@ -93,7 +93,7 @@ func thereAreAlreadyTheFollowingACLsInCluster(ctx context.Context, t framework.T aclClient.Close() }) - for _, user := range usersFromACLTable(t, cluster, acls) { + for _, user := range usersFromACLTable(t, version, cluster, acls) { user := user t.Logf("Creating acls in cluster %q for %q", cluster, user.Name) @@ -123,7 +123,7 @@ func thereAreTheFollowingPreexistingUsersInCluster(ctx context.Context, t framew usersClient := clients.Users(ctx) defer usersClient.Close() - for _, user := range usersFromAuthTable(t, cluster, users) { + for _, user := range usersFromAuthTable(t, version, cluster, users) { user := user t.Logf("Creating user in cluster %q for %q", cluster, user.Name) @@ -141,8 +141,8 @@ func thereAreTheFollowingPreexistingUsersInCluster(ctx context.Context, t framew } } -func shouldBeAbleToAuthenticateToTheClusterWithPasswordAndMechanism(ctx context.Context, t framework.TestingT, user, cluster, password, mechanism string) { - clients := clientsForCluster(ctx, cluster).WithAuthentication(&client.UserAuth{ +func shouldBeAbleToAuthenticateToTheClusterWithPasswordAndMechanism(ctx context.Context, t framework.TestingT, user, version, cluster, password, mechanism string) { + clients := versionedClientsForCluster(ctx, version, cluster).WithAuthentication(&client.UserAuth{ Username: user, Password: password, Mechanism: mechanism, @@ -152,8 +152,8 @@ func shouldBeAbleToAuthenticateToTheClusterWithPasswordAndMechanism(ctx context. require.NotEmpty(t, users) } -func shouldExistAndBeAbleToAuthenticateToTheCluster(ctx context.Context, t framework.TestingT, user, cluster string) { - clients := clientsForCluster(ctx, cluster) +func shouldExistAndBeAbleToAuthenticateToTheCluster(ctx context.Context, t framework.TestingT, user, version, cluster string) { + clients := versionedClientsForCluster(ctx, version, cluster) clients.ExpectUser(ctx, user) @@ -165,8 +165,8 @@ func shouldExistAndBeAbleToAuthenticateToTheCluster(ctx context.Context, t frame clients.AsUser(ctx, &userObject).ExpectUser(ctx, user) } -func thereShouldBeACLsInTheClusterForUser(ctx context.Context, t framework.TestingT, cluster, user string) { - aclClient := clientsForCluster(ctx, cluster).ACLs(ctx) +func thereShouldBeACLsInTheClusterForUser(ctx context.Context, t framework.TestingT, version, cluster, user string) { + aclClient := versionedClientsForCluster(ctx, version, cluster).ACLs(ctx) defer aclClient.Close() rules, err := aclClient.ListACLs(ctx, fmt.Sprintf("User:%s", user)) diff --git a/harpoon/features/stub.feature b/harpoon/features/stub.feature index 7550e8ce3..1485e3404 100644 --- a/harpoon/features/stub.feature +++ b/harpoon/features/stub.feature @@ -1,4 +1,4 @@ -@vcluster @isolated +@isolated @variant:something Feature: stub Scenario Outline: templated stub Given there is a stub diff --git a/harpoon/internal/testing/testing.go b/harpoon/internal/testing/testing.go index bfba4597c..455748c94 100644 --- a/harpoon/internal/testing/testing.go +++ b/harpoon/internal/testing/testing.go @@ -70,6 +70,8 @@ type TestingOptions struct { ExitBehavior ExitBehavior // Images says the base images to import any time a new node comes up Images []string + + variant string } func (o *TestingOptions) Clone() *TestingOptions { @@ -82,6 +84,7 @@ func (o *TestingOptions) Clone() *TestingOptions { SchemeRegisterers: o.SchemeRegisterers, ExitBehavior: o.ExitBehavior, Images: o.Images, + variant: o.variant, } } @@ -235,8 +238,9 @@ func (t *TestingT) ApplyManifest(ctx context.Context, fileOrDirectory string) { t.Cleanup(func(ctx context.Context) { t.Logf("Deleting manifest %q", fileOrDirectory) - _, err := KubectlDelete(ctx, fileOrDirectory, opts) + output, err := KubectlDelete(ctx, fileOrDirectory, opts) require.NoError(t, err) + t.Logf("Deletion finished: %s", output) }) } @@ -326,6 +330,22 @@ func (t *TestingT) IsolateNamespace(ctx context.Context) string { return namespace } +// MarkVariant marks the test with a variant tag that can be fetched later on. +func (t *TestingT) MarkVariant(variant string) { + oldVariant := t.options.variant + t.Logf("Marking test as variant %q", variant) + t.options.variant = variant + + t.Cleanup(func(ctx context.Context) { + t.options.variant = oldVariant + }) +} + +// Variant retrieves the testing variant. +func (t *TestingT) Variant() string { + return t.options.variant +} + // VCluster creates a vcluster instance and sets up the test routines to use it. func (t *TestingT) VCluster(ctx context.Context) string { cluster, err := vcluster.New(ctx, t.restConfig) diff --git a/harpoon/internal/tracking/features.go b/harpoon/internal/tracking/features.go index f7ef9ed74..50f8adcd0 100644 --- a/harpoon/internal/tracking/features.go +++ b/harpoon/internal/tracking/features.go @@ -116,10 +116,12 @@ func (f *FeatureHookTracker) ScenarioFinished(ctx context.Context, scenario *god f.features[scenario.Uri] = features f.scenarios.finish(ctx, scenario) + features.t.Logf("finished feature scenario, %d scenarios left", features.scenariosToRun) if features.scenariosToRun <= 0 { delete(f.features, scenario.Uri) features.t.SetMessagePrefix(fmt.Sprintf("Feature (%s) Cleanup Failure: ", features.name)) + features.t.Log("running cleanup handlers") internaltesting.WrapWithPanicHandler(false, f.opts.ExitBehavior, features.DoCleanup)(ctx, features.hasStepFailure) } } @@ -171,8 +173,7 @@ func (f *FeatureHookTracker) TestRunStarted() {} func (f *FeatureHookTracker) Defined(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) { } -func (f *FeatureHookTracker) Pickle(pickle *messages.Pickle) { -} +func (f *FeatureHookTracker) Pickle(*messages.Pickle) {} func (f *FeatureHookTracker) Failed(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition, error) { } diff --git a/harpoon/suite.go b/harpoon/suite.go index 35ef94ce0..f6674bf32 100644 --- a/harpoon/suite.go +++ b/harpoon/suite.go @@ -16,6 +16,7 @@ import ( "fmt" "os" "os/exec" + "path" "strings" "testing" "time" @@ -101,6 +102,8 @@ func SuiteBuilderFromFlags() *SuiteBuilder { builder.RegisterTag("isolated", -1000, isolatedTag) builder.RegisterTag("vcluster", -2000, vclusterTag) + builder.RegisterTag("variant", -3000, variantTag) + builder.RegisterTag("injectVariant", -3000, injectVariantTag) return builder } @@ -226,6 +229,28 @@ func (b *SuiteBuilder) Build() (*Suite, error) { opts.DefaultContext = ctx opts.Tags = fmt.Sprintf("~@skip:%s", providerName) + parsingSuite := &godog.TestSuite{ + Name: "acceptance", + Options: &opts, + } + features, err := parsingSuite.RetrieveFeatures() + if err != nil { + return nil, err + } + + for _, feature := range features { + if variantTag := featureVariant(feature.Feature.Tags); variantTag != "" { + content := bytes.ReplaceAll(feature.Content, []byte("Feature: "), []byte(fmt.Sprintf("Feature: %s - ", variantTag))) + content = bytes.ReplaceAll(content, []byte("Scenario: "), []byte(fmt.Sprintf("Scenario: %s - ", variantTag))) + content = bytes.ReplaceAll(content, []byte("Scenario Outline: "), []byte(fmt.Sprintf("Scenario Outline: %s - ", variantTag))) + content = bytes.ReplaceAll(content, []byte(fmt.Sprintf("@variant:%s", variantTag)), []byte(fmt.Sprintf("@injectVariant:%s", variantTag))) + opts.FeatureContents = append(opts.FeatureContents, godog.Feature{ + Name: path.Join(variantTag, feature.Uri), + Contents: content, + }) + } + } + var kubeOptions *internaltesting.KubectlOptions var helmClient *helm.Client return &Suite{ diff --git a/harpoon/tags.go b/harpoon/tags.go index 5fe33ac78..8d2875bdd 100644 --- a/harpoon/tags.go +++ b/harpoon/tags.go @@ -11,6 +11,8 @@ package framework import ( "context" + + "github.com/stretchr/testify/require" ) func isolatedTag(ctx context.Context, t TestingT, args ...string) context.Context { @@ -22,3 +24,14 @@ func vclusterTag(ctx context.Context, t TestingT, args ...string) context.Contex t.VCluster(ctx) return ctx } + +func variantTag(ctx context.Context, t TestingT, args ...string) context.Context { + require.Equal(t, len(args), 1, "variant tags take a single argument") + return ctx +} + +func injectVariantTag(ctx context.Context, t TestingT, args ...string) context.Context { + require.Equal(t, len(args), 1, "variant tags take a single argument") + t.MarkVariant(args[0]) + return ctx +} diff --git a/harpoon/types.go b/harpoon/types.go index 4053da5f4..85c689f31 100644 --- a/harpoon/types.go +++ b/harpoon/types.go @@ -54,6 +54,8 @@ type TestingT interface { IsolateNamespace(ctx context.Context) string VCluster(ctx context.Context) string + MarkVariant(variant string) + Variant() string InstallHelmChart(ctx context.Context, url, repo, chart string, options helm.InstallOptions) UpgradeHelmChart(ctx context.Context, repo, chart, release string, options helm.UpgradeOptions) diff --git a/harpoon/variant.go b/harpoon/variant.go new file mode 100644 index 000000000..39c6ccef2 --- /dev/null +++ b/harpoon/variant.go @@ -0,0 +1,17 @@ +package framework + +import ( + "strings" + + messages "github.com/cucumber/messages/go/v21" +) + +func featureVariant(tags []*messages.Tag) string { + for _, tag := range tags { + name := strings.TrimPrefix(tag.Name, "@") + if strings.HasPrefix(name, "variant:") { + return strings.TrimPrefix(name, "variant:") + } + } + return "" +}