From 76d27d181b9c0097f5d941e2461f3a8ca580478c Mon Sep 17 00:00:00 2001 From: Chris Seto Date: Mon, 3 Nov 2025 16:16:10 -0500 Subject: [PATCH 01/12] [WIP] operator: add console stanza -> CRD migration Reconciling a Redpanda will now produce a Console CR with a clusterRef pointing back at the cluster itself. TODO LIST: - [ ] Conversions are fallible, figure out how to log errors without flood the log (rate.Limit?) - As everything is wrapped in a runtime.Extension, it's pretty easy to make an invalid config. - [ ] Acceptance tests that upgrade the operator and show a functioning console CRD and one with migration warnings. - [ ] Figure out how to remove the console Stanza w/o removing the Console CRD (ideally an opt in method?) - [ ] Clean up naming of CRD conversion? --- .../api/redpanda/v1alpha2/console_types.go | 97 +- .../redpanda/v1alpha2/console_types_test.go | 46 + operator/api/redpanda/v1alpha2/conversion.go | 114 +- .../v1alpha2/redpanda_clusterspec_types.go | 2 +- .../console-migration-cases.golden.txtar | 64 + .../testdata/console-migration-cases.txtar | 49 + .../redpanda/v1alpha2/testdata/crd-docs.adoc | 26 +- .../v1alpha2/zz_generated.conversion.go | 231 ++- .../v1alpha2/zz_generated.deepcopy.go | 19 +- .../bases/cluster.redpanda.com_consoles.yaml | 214 +-- .../bases/cluster.redpanda.com_redpandas.yaml | 2 + .../testdata/cases.pools.golden.txtar | 361 +++++ .../testdata/cases.resources.golden.txtar | 1376 ++++++++++++++--- .../internal/lifecycle/testdata/cases.txtar | 11 +- .../testdata/cases.values.golden.txtar | 366 +++++ .../internal/lifecycle/v2_simple_resources.go | 45 +- operator/pkg/functional/map.go | 2 +- 17 files changed, 2600 insertions(+), 425 deletions(-) create mode 100644 operator/api/redpanda/v1alpha2/console_types_test.go create mode 100644 operator/api/redpanda/v1alpha2/testdata/console-migration-cases.golden.txtar create mode 100644 operator/api/redpanda/v1alpha2/testdata/console-migration-cases.txtar diff --git a/operator/api/redpanda/v1alpha2/console_types.go b/operator/api/redpanda/v1alpha2/console_types.go index 70e3ec4a1..79fd30920 100644 --- a/operator/api/redpanda/v1alpha2/console_types.go +++ b/operator/api/redpanda/v1alpha2/console_types.go @@ -10,13 +10,18 @@ package v1alpha2 import ( + "encoding/json" + + "github.com/cockroachdb/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + applycorev1 "k8s.io/client-go/applyconfigurations/core/v1" "k8s.io/utils/ptr" + "github.com/redpanda-data/redpanda-operator/charts/console/v3" "github.com/redpanda-data/redpanda-operator/operator/pkg/functional" ) @@ -120,10 +125,16 @@ type ConsoleValues struct { SecretMounts []SecretMount `json:"secretMounts,omitempty"` Secret SecretConfig `json:"secret,omitempty"` LicenseSecretRef *corev1.SecretKeySelector `json:"licenseSecretRef,omitempty"` - LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty"` - ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty"` + LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` + ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` Deployment *DeploymentConfig `json:"deployment,omitempty"` Strategy *appsv1.DeploymentStrategy `json:"strategy,omitempty"` + // Warnings is a slice of human readable warnings generated by the automatic + // migration of a Console V2 config to a Console V3 config. If warnings are + // present, they will describe which fields from the original config have + // been dropped and why. + // Setting this field has no effect. + Warnings []string `json:"warnings,omitempty"` } type AutoScaling struct { @@ -236,3 +247,85 @@ type RedpandaAdminAPISecrets struct { TLSCert *string `json:"tlsCert,omitempty"` TLSKey *string `json:"tlsKey,omitempty"` } + +// ProbeApplyConfiguration is a wrapper type that allows including a partial +// [corev1.Probe] in a CRD. +type ProbeApplyConfiguration struct { + *applycorev1.ProbeApplyConfiguration `json:",inline"` +} + +func (ac *ProbeApplyConfiguration) DeepCopy() *ProbeApplyConfiguration { + // For some inexplicable reason, apply configs don't have deepcopy + // generated for them. + // + // DeepCopyInto can be generated with just DeepCopy implemented. Sadly, the + // easiest way to implement DeepCopy is to run this type through JSON. It's + // highly unlikely that we'll hit a panic but it is possible to do so with + // invalid values for resource.Quantity and the like. + out := new(ProbeApplyConfiguration) + data, err := json.Marshal(ac) + if err != nil { + panic(err) + } + if err := json.Unmarshal(data, out); err != nil { + panic(err) + } + return out +} + +// ConvertConsoleSubchartToConsoleValues "migrates" the Console field +// ([RedpandaConsole]) of a [Redpanda] into a Console v3 compliant +// [ConsoleValues]. +func ConvertConsoleSubchartToConsoleValues(src *RedpandaConsole) (*ConsoleValues, error) { + // By the redpanda chart's default values, console is enabled by default + // and must be explicitly opted out of. + if src == nil { + // Empty values is valid. + return &ConsoleValues{}, nil + } + + // If the console integration is opted out of, return nil. + if !ptr.Deref(src.Enabled, true) { + return nil, nil + } + + out, err := autoconv_RedpandaConsole_To_ConsoleValues(src) + if err != nil { + return nil, err + } + + // Extract out .Console and .Config. .Console will be migrated and then + // merged into .Config as Config is meant to house V3 configurations. + var v2Config map[string]any + if src.Console != nil && len(src.Console.Raw) > 0 { + if err := json.Unmarshal(src.Console.Raw, &v2Config); err != nil { + return nil, errors.WithStack(err) + } + } + + var v3Config map[string]any + if src.Config != nil && len(src.Config.Raw) > 0 { + if err := json.Unmarshal(src.Config.Raw, &v3Config); err != nil { + return nil, errors.WithStack(err) + } + } + + migrated, warnings, err := console.ConfigFromV2(v2Config) + if err != nil { + return nil, errors.WithStack(err) + } + + merged := functional.MergeMaps(migrated, v3Config) + + marshalled, err := json.Marshal(merged) + if err != nil { + return nil, errors.WithStack(err) + } + + out.Config = &runtime.RawExtension{Raw: marshalled} + // Unlike the docs migrate, warnings get their own field. We can't set + // comments of a Kubernetes resource. + out.Warnings = warnings + + return out, nil +} diff --git a/operator/api/redpanda/v1alpha2/console_types_test.go b/operator/api/redpanda/v1alpha2/console_types_test.go new file mode 100644 index 000000000..ca73068ec --- /dev/null +++ b/operator/api/redpanda/v1alpha2/console_types_test.go @@ -0,0 +1,46 @@ +// Copyright 2025 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package v1alpha2 + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/tools/txtar" + "sigs.k8s.io/yaml" + + "github.com/redpanda-data/redpanda-operator/pkg/testutil" +) + +func TestConvertConsoleSubchartToConsoleValues(t *testing.T) { + cases, err := txtar.ParseFile("testdata/console-migration-cases.txtar") + require.NoError(t, err) + + goldens := testutil.NewTxTar(t, "testdata/console-migration-cases.golden.txtar") + + for i, tc := range cases.Files { + t.Run(tc.Name, func(t *testing.T) { + var in RedpandaConsole + require.NoError(t, yaml.Unmarshal(tc.Data, &in)) + + out, err := ConvertConsoleSubchartToConsoleValues(&in) + require.NoError(t, err) + + actual, err := yaml.Marshal(out) + require.NoError(t, err) + + // Add a bit of extra padding to make it easier to navigate the golden file. + actual = append(actual, '\n') + + goldens.AssertGolden(t, testutil.YAML, fmt.Sprintf("%02d-%s", i, tc.Name), actual) + }) + } +} diff --git a/operator/api/redpanda/v1alpha2/conversion.go b/operator/api/redpanda/v1alpha2/conversion.go index 2d3efcfb1..b97011d59 100644 --- a/operator/api/redpanda/v1alpha2/conversion.go +++ b/operator/api/redpanda/v1alpha2/conversion.go @@ -13,6 +13,7 @@ import ( "encoding/json" "github.com/cockroachdb/errors" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -49,7 +50,13 @@ var ( ConvertSchemaRegistrySpecToIR func(namespace string, src *SchemaRegistrySpec) *ir.SchemaRegistrySpec // Private conversions for tuning / customizing conversions. - // Naming conversion: `autoconv__To__` + // Naming convention: `autoconv__To__` + + // goverter:map . LicenseSecretRef | convertConsoleLicenseSecretRef + // goverter:ignore Config + // goverter:ignore Warnings + // goverter:ignore ExtraContainerPorts + autoconv_RedpandaConsole_To_ConsoleValues func(*RedpandaConsole) (*ConsoleValues, error) // goverter:ignore Create // Ability to disable creation of Deployment is not exposed through the Console CRD. @@ -71,6 +78,33 @@ func getNamespace(namespace string) string { return namespace } +// convertConsoleLicenseSecretRef extracts either the LicenseSecretRef or +// Enterprise.LicenseSecret from a [RedpandaConsole] into a +// [corev1.SecretKeySelector]. +func convertConsoleLicenseSecretRef(src *RedpandaConsole) (*corev1.SecretKeySelector, error) { + // If LicenseSecreRef is set, accept that. + if src.LicenseSecretRef != nil { + return src.LicenseSecretRef, nil + } + + // Short circuit if Enterprise isn't specified. + if src.Enterprise == nil || len(src.Enterprise.Raw) != 0 { + return nil, nil + } + + // Otherwise attempt to extract a secret reference from the Enterprise block. + type ConsoleEnterprise struct { + LicenseSecret *corev1.SecretKeySelector + } + + enterprise, err := convertRuntimeRawExtension[ConsoleEnterprise](src.Enterprise) + if err != nil { + return nil, err + } + + return enterprise.LicenseSecret, nil +} + // Manually implemented conversion routines // Naming conversion: `conv__To__` @@ -231,22 +265,46 @@ func conv_KafkaSASLGSSAPI_To_ir_KafkaSASLGSSAPI(gssAPI *KafkaSASLGSSAPI, namespa return irGSSAPI } -func conv_runtime_RawExtension_To_mapany(ext *runtime.RawExtension) (map[string]any, error) { - if ext == nil { - return nil, nil - } - - var out map[string]any - if err := json.Unmarshal(ext.Raw, &out); err != nil { - return nil, errors.WithStack(err) - } - return out, nil -} - var ( conv_corev1_Volume_To_corev1_Volume = convertDeepCopier[corev1.Volume] conv_corev1_EnvVar_To_corev1EnvVar = convertDeepCopier[corev1.EnvVar] conv_corev1_ResourceRequirements_To_corev1_ResourceRequirements = convertDeepCopier[corev1.ResourceRequirements] + + // RawExtension -> Custom type (RedpandaConsole -> Console) + + conv_runtime_RawExtension_To_mapstringany = convertRuntimeRawExtension[map[string]any] + conv_runtime_RawExtension_To_mapstringstring = convertRuntimeRawExtension[map[string]string] + conv_runtime_RawExtension_To_Image = convertRuntimeRawExtension[*Image] + conv_runtime_RawExtension_To_ServiceAccountConfig = convertRuntimeRawExtension[*ServiceAccountConfig] + conv_runtime_RawExtension_To_Service = convertRuntimeRawExtension[*ServiceConfig] + conv_runtime_RawExtension_To_Ingress = convertRuntimeRawExtension[*IngressConfig] + conv_runtime_RawExtension_To_Autoscaling = convertRuntimeRawExtension[*AutoScaling] + conv_runtime_RawExtension_To_SecretMounts = convertRuntimeRawExtension[SecretMount] + conv_runtime_RawExtension_To_Secret = convertRuntimeRawExtension[SecretConfig] + conv_runtime_RawExtension_To_Deployment = convertRuntimeRawExtension[*DeploymentConfig] + + // RawExtension -> built in types (RedpandaConsole -> Console) + + conv_runtime_RawExtension_To_corev1_Affinity = convertRuntimeRawExtension[*corev1.Affinity] + conv_runtime_RawExtension_To_corev1_Container = convertRuntimeRawExtension[corev1.Container] + conv_runtime_RawExtension_To_corev1_EnvFromSource = convertRuntimeRawExtension[corev1.EnvFromSource] + conv_runtime_RawExtension_To_corev1_EnvVar = convertRuntimeRawExtension[corev1.EnvVar] + conv_runtime_RawExtension_To_corev1_LocalObjectReference = convertRuntimeRawExtension[corev1.LocalObjectReference] + conv_runtime_RawExtension_To_corev1_PodSecurityContext = convertRuntimeRawExtension[*corev1.PodSecurityContext] + conv_runtime_RawExtension_To_corev1_Resources = convertRuntimeRawExtension[*corev1.ResourceRequirements] + conv_runtime_RawExtension_To_corev1_SecurityContext = convertRuntimeRawExtension[*corev1.SecurityContext] + conv_runtime_RawExtension_To_corev1_Strategy = convertRuntimeRawExtension[*appsv1.DeploymentStrategy] + conv_runtime_RawExtension_To_corev1_Tolerations = convertRuntimeRawExtension[corev1.Toleration] + conv_runtime_RawExtension_To_corev1_TopologySpreadConstraints = convertRuntimeRawExtension[[]corev1.TopologySpreadConstraint] + conv_runtime_RawExtension_To_corev1_Volume = convertRuntimeRawExtension[corev1.Volume] + conv_runtime_RawExtension_To_corev1_VolumeMount = convertRuntimeRawExtension[corev1.VolumeMount] + + // TODO THIS IS BAD AND BROKEN (Will write 0s for unspecified fields and generate invalid options). + // ConsolePartialValues really needs to have ApplyConfigs for most k8s types. + // Upgrade gen partial to pull an overridden type from a comment or field tag? + conv_LivenessProbe_To_ProbeApplyConfiguration = convertViaMarshaling[*LivenessProbe, *ProbeApplyConfiguration] + conv_ProbeApplyConfiguration_To_corev1_Probe = convertViaMarshaling[ProbeApplyConfiguration, corev1.Probe] + conv_ReadinessProbe_To_ProbeApplyConfiguration = convertViaMarshaling[*ReadinessProbe, *ProbeApplyConfiguration] ) type deepCopier[T any] interface { @@ -274,3 +332,33 @@ func conv_SecretKeyRefPtr_To_ir_ValueSourcePtr(skr *SecretKeyRef, namespace stri }, } } + +func convertRuntimeRawExtension[T any](ext *runtime.RawExtension) (T, error) { + if ext == nil { + var zero T + return zero, nil + } + + var out T + if err := json.Unmarshal(ext.Raw, &out); err != nil { + var zero T + return zero, errors.Wrapf(err, "unmarshalling %T into %T", ext, zero) + } + return out, nil +} + +func convertViaMarshaling[From any, To any](src From) (To, error) { + marshalled, err := json.Marshal(src) + if err != nil { + var zero To + return zero, errors.Wrapf(err, "marshalling: %T", src) + } + + var out To + if err := json.Unmarshal(marshalled, &out); err != nil { + var zero To + return zero, errors.Wrapf(err, "unmarshalling %T into %T", src, zero) + } + + return out, nil +} diff --git a/operator/api/redpanda/v1alpha2/redpanda_clusterspec_types.go b/operator/api/redpanda/v1alpha2/redpanda_clusterspec_types.go index 442e16c6b..989d99133 100644 --- a/operator/api/redpanda/v1alpha2/redpanda_clusterspec_types.go +++ b/operator/api/redpanda/v1alpha2/redpanda_clusterspec_types.go @@ -196,7 +196,7 @@ type RedpandaConsole struct { // Specifies whether the Redpanda Console subchart should be deployed. Enabled *bool `json:"enabled,omitempty"` // Sets the number of replicas for the Redpanda Console Deployment resource. - ReplicaCount *int `json:"replicaCount,omitempty"` + ReplicaCount *int32 `json:"replicaCount,omitempty"` // Specifies a custom name for the Redpanda Console resources, overriding the default naming convention. NameOverride *string `json:"nameOverride,omitempty"` // Specifies a full custom name, which overrides the entire naming convention including release name and chart name. diff --git a/operator/api/redpanda/v1alpha2/testdata/console-migration-cases.golden.txtar b/operator/api/redpanda/v1alpha2/testdata/console-migration-cases.golden.txtar new file mode 100644 index 000000000..03b8b3a5c --- /dev/null +++ b/operator/api/redpanda/v1alpha2/testdata/console-migration-cases.golden.txtar @@ -0,0 +1,64 @@ +-- 00-empty -- +config: + kafka: + sasl: + enabled: true + impersonateUser: true +secret: {} + +-- 01-disabled -- +null + +-- 02-configured -- +config: + authentication: + jwtSigningKey: secret123 + useSecureCookies: true + authorization: + roleBindings: + - roleName: admin + users: + - loginType: oidc + name: devs + kafka: + sasl: + enabled: true + impersonateUser: true +secret: {} + +-- 03-config-and-console -- +config: + authentication: + someOtherSetting: + - absolutely + kafka: + sasl: + enabled: true + impersonateUser: true +secret: {} + +-- 04-enterprise-and-license-ref -- +config: + kafka: + sasl: + enabled: true + impersonateUser: true +licenseSecretRef: + key: license + name: license +secret: {} + +-- 05-migration-warnings -- +config: + authorization: + roleBindings: + - roleName: admin + users: [] + kafka: + sasl: + enabled: true + impersonateUser: true +secret: {} +warnings: +- Removed group subject from role binding 'admin'. Groups are not supported in v3. + diff --git a/operator/api/redpanda/v1alpha2/testdata/console-migration-cases.txtar b/operator/api/redpanda/v1alpha2/testdata/console-migration-cases.txtar new file mode 100644 index 000000000..14a7959b7 --- /dev/null +++ b/operator/api/redpanda/v1alpha2/testdata/console-migration-cases.txtar @@ -0,0 +1,49 @@ +-- empty -- + +-- disabled -- +enabled: false + +-- configured -- +console: + login: + jwtSecret: secret123 + useSecureCookies: true + roleBindings: + - roleName: admin + subjects: + - kind: user + provider: OIDC + name: devs + +-- config-and-console -- +# console should be migrated to V3's format and merged with config, if present. +console: + login: + jwtSecret: secret123 + useSecureCookies: true +config: + authentication: + someOtherSetting: ["absolutely"] + +-- enterprise-and-license-ref -- +# Enterprise and licenseSecretRef refer to the same thing. +# When both are provided, licenseSecretRef takes precedence. +enterprise: + license: + name: enterprise-license + key: license +licenseSecretRef: + name: license + key: license + +-- migration-warnings -- +# Fields that can't be migrated generate warnings on the new CR to indicate that +# migration was not possible but doesn't not block the migration. + +console: + roleBindings: + - roleName: admin + subjects: + - kind: group + provider: OIDC + name: devs diff --git a/operator/api/redpanda/v1alpha2/testdata/crd-docs.adoc b/operator/api/redpanda/v1alpha2/testdata/crd-docs.adoc index 00c4cfed7..fbd349dcb 100644 --- a/operator/api/redpanda/v1alpha2/testdata/crd-docs.adoc +++ b/operator/api/redpanda/v1alpha2/testdata/crd-docs.adoc @@ -956,10 +956,15 @@ ConsoleCreateObj represents configuration options for creating Kubernetes object | *`secretMounts`* __xref:{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-secretmount[$$SecretMount$$] array__ | | | | *`secret`* __xref:{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-secretconfig[$$SecretConfig$$]__ | | | | *`licenseSecretRef`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#secretkeyselector-v1-core[$$SecretKeySelector$$]__ | | | -| *`livenessProbe`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#probe-v1-core[$$Probe$$]__ | | | -| *`readinessProbe`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#probe-v1-core[$$Probe$$]__ | | | +| *`livenessProbe`* __xref:{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-probeapplyconfiguration[$$ProbeApplyConfiguration$$]__ | | | +| *`readinessProbe`* __xref:{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-probeapplyconfiguration[$$ProbeApplyConfiguration$$]__ | | | | *`deployment`* __xref:{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-deploymentconfig[$$DeploymentConfig$$]__ | | | | *`strategy`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#deploymentstrategy-v1-apps[$$DeploymentStrategy$$]__ | | | +| *`warnings`* __string array__ | Warnings is a slice of human readable warnings generated by the automatic + +migration of a Console V2 config to a Console V3 config. If warnings are + +present, they will describe which fields from the original config have + +been dropped and why. + +Setting this field has no effect. + | | | *`cluster`* __xref:{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-clustersource[$$ClusterSource$$]__ | | | |=== @@ -2248,6 +2253,23 @@ into this Job's PodTemplate. + | | |=== +[id="{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-probeapplyconfiguration"] +==== ProbeApplyConfiguration + + + +ProbeApplyConfiguration is a wrapper type that allows including a partial +[corev1.Probe] in a CRD. + + + +.Appears In: +**** +- xref:{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-consolespec[$$ConsoleSpec$$] +**** + + + [id="{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-rbac"] ==== RBAC diff --git a/operator/api/redpanda/v1alpha2/zz_generated.conversion.go b/operator/api/redpanda/v1alpha2/zz_generated.conversion.go index ec128535a..bdcb6c54b 100644 --- a/operator/api/redpanda/v1alpha2/zz_generated.conversion.go +++ b/operator/api/redpanda/v1alpha2/zz_generated.conversion.go @@ -103,7 +103,7 @@ func init() { xstring := *(*source).PriorityClassName consolePartialRenderValues.PriorityClassName = &xstring } - mapStringUnknown, err := conv_runtime_RawExtension_To_mapany((*source).Config) + mapStringUnknown, err := conv_runtime_RawExtension_To_mapstringany((*source).Config) if err != nil { return nil, err } @@ -152,8 +152,16 @@ func init() { } consolePartialRenderValues.Secret = v1alpha2SecretConfigToPConsolePartialSecretConfig((*source).Secret) consolePartialRenderValues.LicenseSecretRef = pV1SecretKeySelectorToPV1SecretKeySelector((*source).LicenseSecretRef) - consolePartialRenderValues.LivenessProbe = pV1ProbeToPV1Probe((*source).LivenessProbe) - consolePartialRenderValues.ReadinessProbe = pV1ProbeToPV1Probe((*source).ReadinessProbe) + pV1Probe, err := pV1alpha2ProbeApplyConfigurationToPV1Probe((*source).LivenessProbe) + if err != nil { + return nil, err + } + consolePartialRenderValues.LivenessProbe = pV1Probe + pV1Probe2, err := pV1alpha2ProbeApplyConfigurationToPV1Probe((*source).ReadinessProbe) + if err != nil { + return nil, err + } + consolePartialRenderValues.ReadinessProbe = pV1Probe2 consolePartialRenderValues.Deployment = autoconv_DeploymentConfig_console_PartialDeploymentConfig((*source).Deployment) consolePartialRenderValues.Strategy = pV1DeploymentStrategyToPV1DeploymentStrategy((*source).Strategy) pConsolePartialRenderValues = &consolePartialRenderValues @@ -223,6 +231,212 @@ func init() { } return pConsolePartialDeploymentConfig } + autoconv_RedpandaConsole_To_ConsoleValues = func(source *RedpandaConsole) (*ConsoleValues, error) { + var pV1alpha2ConsoleValues *ConsoleValues + if source != nil { + var v1alpha2ConsoleValues ConsoleValues + if (*source).ReplicaCount != nil { + xint32 := *(*source).ReplicaCount + v1alpha2ConsoleValues.ReplicaCount = &xint32 + } + pV1alpha2Image, err := conv_runtime_RawExtension_To_Image((*source).Image) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.Image = pV1alpha2Image + if (*source).ImagePullSecrets != nil { + v1alpha2ConsoleValues.ImagePullSecrets = make([]v1.LocalObjectReference, len((*source).ImagePullSecrets)) + for i := 0; i < len((*source).ImagePullSecrets); i++ { + v1LocalObjectReference, err := conv_runtime_RawExtension_To_corev1_LocalObjectReference((*source).ImagePullSecrets[i]) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.ImagePullSecrets[i] = v1LocalObjectReference + } + } + if (*source).AutomountServiceAccountToken != nil { + xbool := *(*source).AutomountServiceAccountToken + v1alpha2ConsoleValues.AutomountServiceAccountToken = &xbool + } + pV1alpha2ServiceAccountConfig, err := conv_runtime_RawExtension_To_ServiceAccountConfig((*source).ServiceAccount) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.ServiceAccount = pV1alpha2ServiceAccountConfig + if (*source).CommonLabels != nil { + v1alpha2ConsoleValues.CommonLabels = make(map[string]string, len((*source).CommonLabels)) + for key, value := range (*source).CommonLabels { + v1alpha2ConsoleValues.CommonLabels[key] = value + } + } + mapStringString, err := conv_runtime_RawExtension_To_mapstringstring((*source).Annotations) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.Annotations = mapStringString + mapStringString2, err := conv_runtime_RawExtension_To_mapstringstring((*source).PodAnnotations) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.PodAnnotations = mapStringString2 + mapStringString3, err := conv_runtime_RawExtension_To_mapstringstring((*source).PodLabels) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.PodLabels = mapStringString3 + pV1PodSecurityContext, err := conv_runtime_RawExtension_To_corev1_PodSecurityContext((*source).PodSecurityContext) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.PodSecurityContext = pV1PodSecurityContext + pV1SecurityContext, err := conv_runtime_RawExtension_To_corev1_SecurityContext((*source).SecurityContext) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.SecurityContext = pV1SecurityContext + pV1alpha2ServiceConfig, err := conv_runtime_RawExtension_To_Service((*source).Service) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.Service = pV1alpha2ServiceConfig + pV1alpha2IngressConfig, err := conv_runtime_RawExtension_To_Ingress((*source).Ingress) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.Ingress = pV1alpha2IngressConfig + pV1ResourceRequirements, err := conv_runtime_RawExtension_To_corev1_Resources((*source).Resources) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.Resources = pV1ResourceRequirements + pV1alpha2AutoScaling, err := conv_runtime_RawExtension_To_Autoscaling((*source).Autoscaling) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.Autoscaling = pV1alpha2AutoScaling + mapStringString4, err := conv_runtime_RawExtension_To_mapstringstring((*source).NodeSelector) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.NodeSelector = mapStringString4 + if (*source).Tolerations != nil { + v1alpha2ConsoleValues.Tolerations = make([]v1.Toleration, len((*source).Tolerations)) + for j := 0; j < len((*source).Tolerations); j++ { + v1Toleration, err := conv_runtime_RawExtension_To_corev1_Tolerations((*source).Tolerations[j]) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.Tolerations[j] = v1Toleration + } + } + pV1Affinity, err := conv_runtime_RawExtension_To_corev1_Affinity((*source).Affinity) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.Affinity = pV1Affinity + v1TopologySpreadConstraintList, err := conv_runtime_RawExtension_To_corev1_TopologySpreadConstraints((*source).TopologySpreadConstraints) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.TopologySpreadConstraints = v1TopologySpreadConstraintList + if (*source).PriorityClassName != nil { + xstring := *(*source).PriorityClassName + v1alpha2ConsoleValues.PriorityClassName = &xstring + } + if (*source).ExtraEnv != nil { + v1alpha2ConsoleValues.ExtraEnv = make([]v1.EnvVar, len((*source).ExtraEnv)) + for k := 0; k < len((*source).ExtraEnv); k++ { + v1EnvVar, err := conv_runtime_RawExtension_To_corev1_EnvVar((*source).ExtraEnv[k]) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.ExtraEnv[k] = v1EnvVar + } + } + if (*source).ExtraEnvFrom != nil { + v1alpha2ConsoleValues.ExtraEnvFrom = make([]v1.EnvFromSource, len((*source).ExtraEnvFrom)) + for l := 0; l < len((*source).ExtraEnvFrom); l++ { + v1EnvFromSource, err := conv_runtime_RawExtension_To_corev1_EnvFromSource((*source).ExtraEnvFrom[l]) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.ExtraEnvFrom[l] = v1EnvFromSource + } + } + if (*source).ExtraVolumes != nil { + v1alpha2ConsoleValues.ExtraVolumes = make([]v1.Volume, len((*source).ExtraVolumes)) + for m := 0; m < len((*source).ExtraVolumes); m++ { + v1Volume, err := conv_runtime_RawExtension_To_corev1_Volume((*source).ExtraVolumes[m]) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.ExtraVolumes[m] = v1Volume + } + } + if (*source).ExtraVolumeMounts != nil { + v1alpha2ConsoleValues.ExtraVolumeMounts = make([]v1.VolumeMount, len((*source).ExtraVolumeMounts)) + for n := 0; n < len((*source).ExtraVolumeMounts); n++ { + v1VolumeMount, err := conv_runtime_RawExtension_To_corev1_VolumeMount((*source).ExtraVolumeMounts[n]) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.ExtraVolumeMounts[n] = v1VolumeMount + } + } + if (*source).ExtraContainers != nil { + v1alpha2ConsoleValues.ExtraContainers = make([]v1.Container, len((*source).ExtraContainers)) + for o := 0; o < len((*source).ExtraContainers); o++ { + v1Container, err := conv_runtime_RawExtension_To_corev1_Container((*source).ExtraContainers[o]) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.ExtraContainers[o] = v1Container + } + } + if (*source).SecretMounts != nil { + v1alpha2ConsoleValues.SecretMounts = make([]SecretMount, len((*source).SecretMounts)) + for p := 0; p < len((*source).SecretMounts); p++ { + v1alpha2SecretMount, err := conv_runtime_RawExtension_To_SecretMounts((*source).SecretMounts[p]) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.SecretMounts[p] = v1alpha2SecretMount + } + } + v1alpha2SecretConfig, err := conv_runtime_RawExtension_To_Secret((*source).Secret) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.Secret = v1alpha2SecretConfig + pV1SecretKeySelector, err := convertConsoleLicenseSecretRef(source) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.LicenseSecretRef = pV1SecretKeySelector + pV1alpha2ProbeApplyConfiguration, err := conv_LivenessProbe_To_ProbeApplyConfiguration((*source).LivenessProbe) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.LivenessProbe = pV1alpha2ProbeApplyConfiguration + pV1alpha2ProbeApplyConfiguration2, err := conv_ReadinessProbe_To_ProbeApplyConfiguration((*source).ReadinessProbe) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.ReadinessProbe = pV1alpha2ProbeApplyConfiguration2 + pV1alpha2DeploymentConfig, err := conv_runtime_RawExtension_To_Deployment((*source).Deployment) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.Deployment = pV1alpha2DeploymentConfig + pV1DeploymentStrategy, err := conv_runtime_RawExtension_To_corev1_Strategy((*source).Strategy) + if err != nil { + return nil, err + } + v1alpha2ConsoleValues.Strategy = pV1DeploymentStrategy + pV1alpha2ConsoleValues = &v1alpha2ConsoleValues + } + return pV1alpha2ConsoleValues, nil + } autoconv_ServiceAccountConfig_To_console_PartialServiceAccountConfig = func(source *ServiceAccountConfig) *v3.PartialServiceAccountConfig { var pConsolePartialServiceAccountConfig *v3.PartialServiceAccountConfig if source != nil { @@ -894,6 +1108,17 @@ func pV1alpha2OIDCLoginSecretsToPConsolePartialOIDCLoginSecrets(source *OIDCLogi } return pConsolePartialOIDCLoginSecrets } +func pV1alpha2ProbeApplyConfigurationToPV1Probe(source *ProbeApplyConfiguration) (*v1.Probe, error) { + var pV1Probe *v1.Probe + if source != nil { + v1Probe, err := conv_ProbeApplyConfiguration_To_corev1_Probe((*source)) + if err != nil { + return nil, err + } + pV1Probe = &v1Probe + } + return pV1Probe, nil +} func pV1alpha2RedpandaAdminAPISecretsToPConsolePartialRedpandaAdminAPISecrets(source *RedpandaAdminAPISecrets) *v3.PartialRedpandaAdminAPISecrets { var pConsolePartialRedpandaAdminAPISecrets *v3.PartialRedpandaAdminAPISecrets if source != nil { diff --git a/operator/api/redpanda/v1alpha2/zz_generated.deepcopy.go b/operator/api/redpanda/v1alpha2/zz_generated.deepcopy.go index e96ccd6fa..d8611077b 100644 --- a/operator/api/redpanda/v1alpha2/zz_generated.deepcopy.go +++ b/operator/api/redpanda/v1alpha2/zz_generated.deepcopy.go @@ -1199,13 +1199,11 @@ func (in *ConsoleValues) DeepCopyInto(out *ConsoleValues) { } if in.LivenessProbe != nil { in, out := &in.LivenessProbe, &out.LivenessProbe - *out = new(v1.Probe) - (*in).DeepCopyInto(*out) + *out = (*in).DeepCopy() } if in.ReadinessProbe != nil { in, out := &in.ReadinessProbe, &out.ReadinessProbe - *out = new(v1.Probe) - (*in).DeepCopyInto(*out) + *out = (*in).DeepCopy() } if in.Deployment != nil { in, out := &in.Deployment, &out.Deployment @@ -1217,6 +1215,11 @@ func (in *ConsoleValues) DeepCopyInto(out *ConsoleValues) { *out = new(appsv1.DeploymentStrategy) (*in).DeepCopyInto(*out) } + if in.Warnings != nil { + in, out := &in.Warnings, &out.Warnings + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleValues. @@ -2968,6 +2971,12 @@ func (in *PostUpgradeJob) DeepCopy() *PostUpgradeJob { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeApplyConfiguration) DeepCopyInto(out *ProbeApplyConfiguration) { + clone := in.DeepCopy() + *out = *clone +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RBAC) DeepCopyInto(out *RBAC) { *out = *in @@ -3517,7 +3526,7 @@ func (in *RedpandaConsole) DeepCopyInto(out *RedpandaConsole) { } if in.ReplicaCount != nil { in, out := &in.ReplicaCount, &out.ReplicaCount - *out = new(int) + *out = new(int32) **out = **in } if in.NameOverride != nil { diff --git a/operator/config/crd/bases/cluster.redpanda.com_consoles.yaml b/operator/config/crd/bases/cluster.redpanda.com_consoles.yaml index 52dd0ac3d..45aa23e5e 100644 --- a/operator/config/crd/bases/cluster.redpanda.com_consoles.yaml +++ b/operator/config/crd/bases/cluster.redpanda.com_consoles.yaml @@ -6857,154 +6857,90 @@ spec: x-kubernetes-map-type: atomic livenessProbe: description: |- - Probe describes a health check to be performed against a container to determine whether it is - alive or ready to receive traffic. + ProbeApplyConfiguration is a wrapper type that allows including a partial + [corev1.Probe] in a CRD. properties: exec: - description: Exec specifies a command to execute in the container. + description: |- + ExecActionApplyConfiguration represents a declarative configuration of the ExecAction type for use + with apply. properties: command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array - x-kubernetes-list-type: atomic type: object failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: - description: GRPC specifies a GRPC HealthCheckRequest. + description: |- + GRPCActionApplyConfiguration represents a declarative configuration of the GRPCAction type for use + with apply. properties: port: - description: Port number of the gRPC service. Number must - be in the range 1 to 65535. format: int32 type: integer service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. type: string - required: - - port type: object httpGet: - description: HTTPGet specifies an HTTP GET request to perform. + description: |- + HTTPGetActionApplyConfiguration represents a declarative configuration of the HTTPGetAction type for use + with apply. properties: host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. type: string httpHeaders: - description: Custom headers to set in the request. HTTP allows - repeated headers. items: - description: HTTPHeader describes a custom header to be - used in HTTP probes + description: |- + HTTPHeaderApplyConfiguration represents a declarative configuration of the HTTPHeader type for use + with apply. properties: name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: - description: The header field value type: string - required: - - name - - value type: object type: array - x-kubernetes-list-type: atomic path: - description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. + description: URIScheme identifies the scheme used for connection + to a host for Get actions type: string - required: - - port type: object initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: - description: TCPSocket specifies a connection to a TCP port. + description: |- + TCPSocketActionApplyConfiguration represents a declarative configuration of the TCPSocketAction type for use + with apply. properties: host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' type: string port: anyOf: - type: integer - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - required: - - port type: object terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object @@ -7259,154 +7195,90 @@ spec: type: string readinessProbe: description: |- - Probe describes a health check to be performed against a container to determine whether it is - alive or ready to receive traffic. + ProbeApplyConfiguration is a wrapper type that allows including a partial + [corev1.Probe] in a CRD. properties: exec: - description: Exec specifies a command to execute in the container. + description: |- + ExecActionApplyConfiguration represents a declarative configuration of the ExecAction type for use + with apply. properties: command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array - x-kubernetes-list-type: atomic type: object failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: - description: GRPC specifies a GRPC HealthCheckRequest. + description: |- + GRPCActionApplyConfiguration represents a declarative configuration of the GRPCAction type for use + with apply. properties: port: - description: Port number of the gRPC service. Number must - be in the range 1 to 65535. format: int32 type: integer service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. type: string - required: - - port type: object httpGet: - description: HTTPGet specifies an HTTP GET request to perform. + description: |- + HTTPGetActionApplyConfiguration represents a declarative configuration of the HTTPGetAction type for use + with apply. properties: host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. type: string httpHeaders: - description: Custom headers to set in the request. HTTP allows - repeated headers. items: - description: HTTPHeader describes a custom header to be - used in HTTP probes + description: |- + HTTPHeaderApplyConfiguration represents a declarative configuration of the HTTPHeader type for use + with apply. properties: name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: - description: The header field value type: string - required: - - name - - value type: object type: array - x-kubernetes-list-type: atomic path: - description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. + description: URIScheme identifies the scheme used for connection + to a host for Get actions type: string - required: - - port type: object initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: - description: TCPSocket specifies a connection to a TCP port. + description: |- + TCPSocketActionApplyConfiguration represents a declarative configuration of the TCPSocketAction type for use + with apply. properties: host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' type: string port: anyOf: - type: integer - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - required: - - port type: object terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object @@ -8035,6 +7907,16 @@ spec: - whenUnsatisfiable type: object type: array + warnings: + description: |- + Warnings is a slice of human readable warnings generated by the automatic + migration of a Console V2 config to a Console V3 config. If warnings are + present, they will describe which fields from the original config have + been dropped and why. + Setting this field has no effect. + items: + type: string + type: array type: object status: properties: diff --git a/operator/config/crd/bases/cluster.redpanda.com_redpandas.yaml b/operator/config/crd/bases/cluster.redpanda.com_redpandas.yaml index 46a922880..4543e3b79 100644 --- a/operator/config/crd/bases/cluster.redpanda.com_redpandas.yaml +++ b/operator/config/crd/bases/cluster.redpanda.com_redpandas.yaml @@ -1736,6 +1736,7 @@ spec: replicaCount: description: Sets the number of replicas for the Redpanda Console Deployment resource. + format: int32 type: integer resources: description: Configures resource requests and limits for the @@ -22846,6 +22847,7 @@ spec: replicaCount: description: Sets the number of replicas for the Redpanda Console Deployment resource. + format: int32 type: integer resources: description: Configures resource requests and limits for the diff --git a/operator/internal/lifecycle/testdata/cases.pools.golden.txtar b/operator/internal/lifecycle/testdata/cases.pools.golden.txtar index d9ffe4988..315c45e32 100644 --- a/operator/internal/lifecycle/testdata/cases.pools.golden.txtar +++ b/operator/internal/lifecycle/testdata/cases.pools.golden.txtar @@ -1149,6 +1149,367 @@ status: availableReplicas: 0 replicas: 0 +-- console-disabled -- +- apiVersion: apps/v1 + kind: StatefulSet + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled + namespace: console-disabled + spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/component: redpanda-statefulset + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/name: redpanda + serviceName: console-disabled + template: + metadata: + annotations: + config.redpanda.com/checksum: a90b21628d89546d234075143f437a7118e87dca2eb009f7ffb653e7b8f09eca + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda-statefulset + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/broker: "true" + helm.sh/chart: redpanda-25.1.1-beta3 + redpanda.com/poddisruptionbudget: console-disabled + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: redpanda-statefulset + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/name: redpanda + topologyKey: kubernetes.io/hostname + automountServiceAccountToken: false + containers: + - command: + - rpk + - redpanda + - start + - --advertise-rpc-addr=$(SERVICE_NAME).console-disabled.console-disabled.svc.cluster.local.:33145 + env: + - name: SERVICE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + image: redpandadata/redpanda:v25.2.1 + lifecycle: + postStart: + exec: + command: + - bash + - -c + - 'timeout -v 45 bash -x /var/lifecycle/postStart.sh 2>&1 | sed "s/^/lifecycle-hook + post-start $(date): /" | tee /proc/1/fd/1; true' + preStop: + exec: + command: + - bash + - -c + - 'timeout -v 45 bash -x /var/lifecycle/preStop.sh 2>&1 | sed "s/^/lifecycle-hook + pre-stop $(date): /" | tee /proc/1/fd/1; true' + livenessProbe: + exec: + command: + - /bin/sh + - -c + - curl --silent --fail -k -m 5 --cacert /etc/tls/certs/default/ca.crt + "https://${SERVICE_NAME}.console-disabled.console-disabled.svc.cluster.local.:9644/v1/status/ready" + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + name: redpanda + ports: + - containerPort: 9644 + name: admin + - containerPort: 9645 + name: admin-default + - containerPort: 8082 + name: http + - containerPort: 8083 + name: http-default + - containerPort: 9093 + name: kafka + - containerPort: 9094 + name: kafka-default + - containerPort: 33145 + name: rpc + - containerPort: 8081 + name: schemaregistry + - containerPort: 8084 + name: schema-default + resources: + limits: + cpu: "1" + memory: 2560Mi + startupProbe: + exec: + command: + - /bin/sh + - -c + - | + set -e + RESULT=$(curl --silent --fail -k -m 5 --cacert /etc/tls/certs/default/ca.crt "https://${SERVICE_NAME}.console-disabled.console-disabled.svc.cluster.local.:9644/v1/status/ready") + echo $RESULT + echo $RESULT | grep ready + failureThreshold: 120 + initialDelaySeconds: 1 + periodSeconds: 10 + volumeMounts: + - mountPath: /etc/tls/certs/default + name: redpanda-default-cert + - mountPath: /etc/tls/certs/external + name: redpanda-external-cert + - mountPath: /etc/redpanda + name: config + - mountPath: /tmp/base-config + name: base-config + - mountPath: /var/lifecycle + name: lifecycle-scripts + - mountPath: /var/lib/redpanda/data + name: datadir + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access + readOnly: true + - args: + - supervisor + - -- + - /redpanda-operator + - sidecar + - --redpanda-yaml + - /etc/redpanda/redpanda.yaml + - --redpanda-cluster-namespace + - console-disabled + - --redpanda-cluster-name + - console-disabled + - --run-broker-probe + - --broker-probe-broker-url + - $(SERVICE_NAME).console-disabled.console-disabled.svc.cluster.local.:9644 + - --no-set-superusers + command: + - /redpanda-operator + env: + - name: SERVICE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + image: localhost/redpanda-operator:dev + name: sidecar + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 8093 + initialDelaySeconds: 1 + periodSeconds: 10 + successThreshold: 1 + resources: {} + securityContext: {} + volumeMounts: + - mountPath: /etc/tls/certs/default + name: redpanda-default-cert + - mountPath: /etc/tls/certs/external + name: redpanda-external-cert + - mountPath: /etc/redpanda + name: config + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access + readOnly: true + initContainers: + - command: + - /bin/bash + - -c + - rpk redpanda tune all + image: redpandadata/redpanda:v25.2.1 + name: tuning + resources: {} + securityContext: + capabilities: + add: + - SYS_RESOURCE + privileged: true + runAsGroup: 0 + runAsUser: 0 + volumeMounts: + - mountPath: /etc/tls/certs/default + name: redpanda-default-cert + - mountPath: /etc/tls/certs/external + name: redpanda-external-cert + - mountPath: /etc/redpanda + name: base-config + - command: + - /bin/bash + - -c + - trap "exit 0" TERM; exec $CONFIGURATOR_SCRIPT "${SERVICE_NAME}" "${KUBERNETES_NODE_NAME}" + & wait $! + env: + - name: CONFIGURATOR_SCRIPT + value: /etc/secrets/configurator/scripts/configurator.sh + - name: SERVICE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: HOST_IP_ADDRESS + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + image: redpandadata/redpanda:v25.2.1 + name: redpanda-configurator + resources: {} + volumeMounts: + - mountPath: /etc/tls/certs/default + name: redpanda-default-cert + - mountPath: /etc/tls/certs/external + name: redpanda-external-cert + - mountPath: /etc/redpanda + name: config + - mountPath: /tmp/base-config + name: base-config + - mountPath: /etc/secrets/configurator/scripts/ + name: console-disabled-configurator + - command: + - /redpanda-operator + - bootstrap + - --in-dir + - /tmp/base-config + - --out-dir + - /tmp/config + image: localhost/redpanda-operator:dev + name: bootstrap-yaml-envsubst + resources: + limits: + cpu: 100m + memory: 125Mi + requests: + cpu: 100m + memory: 125Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + volumeMounts: + - mountPath: /tmp/config/ + name: config + - mountPath: /tmp/base-config/ + name: base-config + securityContext: + fsGroup: 101 + fsGroupChangePolicy: OnRootMismatch + runAsUser: 101 + serviceAccountName: console-disabled + terminationGracePeriodSeconds: 90 + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/component: redpanda-statefulset + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/name: redpanda + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + volumes: + - name: redpanda-default-cert + secret: + defaultMode: 288 + secretName: console-disabled-default-cert + - name: redpanda-external-cert + secret: + defaultMode: 288 + secretName: console-disabled-external-cert + - name: lifecycle-scripts + secret: + defaultMode: 509 + secretName: console-disabled-sts-lifecycle + - configMap: + name: console-disabled + name: base-config + - emptyDir: {} + name: config + - name: console-disabled-configurator + secret: + defaultMode: 509 + secretName: console-disabled-configurator + - name: datadir + persistentVolumeClaim: + claimName: datadir + - name: kube-api-access + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + expirationSeconds: 3607 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/name: redpanda + name: datadir + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + status: {} + status: + availableReplicas: 0 + replicas: 0 -- nodepool-basic-test -- - apiVersion: apps/v1 kind: StatefulSet diff --git a/operator/internal/lifecycle/testdata/cases.resources.golden.txtar b/operator/internal/lifecycle/testdata/cases.resources.golden.txtar index b074f8e8a..bb6ce28f3 100644 --- a/operator/internal/lifecycle/testdata/cases.resources.golden.txtar +++ b/operator/internal/lifecycle/testdata/cases.resources.golden.txtar @@ -994,6 +994,20 @@ path: secrets/basic-test-default-cert/ca.crt name: basic-test-default-cert status: {} +- metadata: + creationTimestamp: null + labels: + cluster.redpanda.com/namespace: basic-test + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: basic-test + helm.toolkit.fluxcd.io/name: basic-test + helm.toolkit.fluxcd.io/namespace: basic-test + spec: + cluster: + clusterRef: + name: basic-test + secret: {} + status: {} -- compat-test -- - apiVersion: v1 kind: Service @@ -2429,24 +2443,38 @@ path: secrets/compat-test-default-cert/ca.crt name: compat-test-default-cert status: {} --- nodepool-basic-test -- +- metadata: + creationTimestamp: null + labels: + cluster.redpanda.com/namespace: compat-test + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: compat-test + helm.toolkit.fluxcd.io/name: compat-test + helm.toolkit.fluxcd.io/namespace: compat-test + spec: + cluster: + clusterRef: + name: compat-test + secret: {} + status: {} +-- console-disabled -- - apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: app.kubernetes.io/component: redpanda - app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/instance: console-disabled app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: redpanda - cluster.redpanda.com/namespace: nodepool-basic-test + cluster.redpanda.com/namespace: console-disabled cluster.redpanda.com/operator: v2 - cluster.redpanda.com/owner: nodepool-basic-test + cluster.redpanda.com/owner: console-disabled helm.sh/chart: redpanda-25.1.1-beta3 - helm.toolkit.fluxcd.io/name: nodepool-basic-test - helm.toolkit.fluxcd.io/namespace: nodepool-basic-test - name: nodepool-basic-test-external - namespace: nodepool-basic-test + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-external + namespace: console-disabled spec: externalTrafficPolicy: Local ports: @@ -2472,7 +2500,7 @@ targetPort: 0 publishNotReadyAddresses: true selector: - app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/instance: console-disabled app.kubernetes.io/name: redpanda sessionAffinity: None type: NodePort @@ -2484,24 +2512,24 @@ creationTimestamp: null labels: app.kubernetes.io/component: redpanda - app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/instance: console-disabled app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: redpanda - cluster.redpanda.com/namespace: nodepool-basic-test + cluster.redpanda.com/namespace: console-disabled cluster.redpanda.com/operator: v2 - cluster.redpanda.com/owner: nodepool-basic-test + cluster.redpanda.com/owner: console-disabled helm.sh/chart: redpanda-25.1.1-beta3 - helm.toolkit.fluxcd.io/name: nodepool-basic-test - helm.toolkit.fluxcd.io/namespace: nodepool-basic-test - name: nodepool-basic-test - namespace: nodepool-basic-test + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled + namespace: console-disabled spec: maxUnavailable: 1 selector: matchLabels: - app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/instance: console-disabled app.kubernetes.io/name: redpanda - redpanda.com/poddisruptionbudget: nodepool-basic-test + redpanda.com/poddisruptionbudget: console-disabled status: currentHealthy: 0 desiredHealthy: 0 @@ -2514,35 +2542,35 @@ creationTimestamp: null labels: app.kubernetes.io/component: redpanda - app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/instance: console-disabled app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: redpanda - cluster.redpanda.com/namespace: nodepool-basic-test + cluster.redpanda.com/namespace: console-disabled cluster.redpanda.com/operator: v2 - cluster.redpanda.com/owner: nodepool-basic-test + cluster.redpanda.com/owner: console-disabled helm.sh/chart: redpanda-25.1.1-beta3 - helm.toolkit.fluxcd.io/name: nodepool-basic-test - helm.toolkit.fluxcd.io/namespace: nodepool-basic-test - name: nodepool-basic-test - namespace: nodepool-basic-test + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled + namespace: console-disabled - apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: app.kubernetes.io/component: redpanda - app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/instance: console-disabled app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: redpanda - cluster.redpanda.com/namespace: nodepool-basic-test + cluster.redpanda.com/namespace: console-disabled cluster.redpanda.com/operator: v2 - cluster.redpanda.com/owner: nodepool-basic-test + cluster.redpanda.com/owner: console-disabled helm.sh/chart: redpanda-25.1.1-beta3 - helm.toolkit.fluxcd.io/name: nodepool-basic-test - helm.toolkit.fluxcd.io/namespace: nodepool-basic-test + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled monitoring.redpanda.com/enabled: "false" - name: nodepool-basic-test - namespace: nodepool-basic-test + name: console-disabled + namespace: console-disabled spec: clusterIP: None ports: @@ -2568,7 +2596,7 @@ targetPort: 8081 publishNotReadyAddresses: true selector: - app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/instance: console-disabled app.kubernetes.io/name: redpanda type: ClusterIP status: @@ -2606,15 +2634,11 @@ require_client_auth: false truststore_file: /etc/tls/certs/default/ca.crt brokers: - - address: nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 9093 - - address: nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 9093 - - address: nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + - address: console-disabled-0.console-disabled.console-disabled.svc.cluster.local. port: 9093 - - address: nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + - address: console-disabled-1.console-disabled.console-disabled.svc.cluster.local. port: 9093 - - address: nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + - address: console-disabled-2.console-disabled.console-disabled.svc.cluster.local. port: 9093 redpanda: admin: @@ -2670,19 +2694,13 @@ truststore_file: /etc/tls/certs/default/ca.crt seed_servers: - host: - address: nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 33145 - - host: - address: nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 33145 - - host: - address: nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + address: console-disabled-0.console-disabled.console-disabled.svc.cluster.local. port: 33145 - host: - address: nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + address: console-disabled-1.console-disabled.console-disabled.svc.cluster.local. port: 33145 - host: - address: nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + address: console-disabled-2.console-disabled.console-disabled.svc.cluster.local. port: 33145 rpk: additional_start_flags: @@ -2692,31 +2710,25 @@ - --smp=1 admin_api: addresses: - - nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 - - nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 - - nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 - - nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 - - nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 + - console-disabled-0.console-disabled.console-disabled.svc.cluster.local.:9644 + - console-disabled-1.console-disabled.console-disabled.svc.cluster.local.:9644 + - console-disabled-2.console-disabled.console-disabled.svc.cluster.local.:9644 tls: ca_file: /etc/tls/certs/default/ca.crt enable_memory_locking: false kafka_api: brokers: - - nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 - - nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 - - nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 - - nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 - - nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 + - console-disabled-0.console-disabled.console-disabled.svc.cluster.local.:9093 + - console-disabled-1.console-disabled.console-disabled.svc.cluster.local.:9093 + - console-disabled-2.console-disabled.console-disabled.svc.cluster.local.:9093 tls: ca_file: /etc/tls/certs/default/ca.crt overprovisioned: false schema_registry: addresses: - - nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 - - nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 - - nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 - - nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 - - nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 + - console-disabled-0.console-disabled.console-disabled.svc.cluster.local.:8081 + - console-disabled-1.console-disabled.console-disabled.svc.cluster.local.:8081 + - console-disabled-2.console-disabled.console-disabled.svc.cluster.local.:8081 tls: ca_file: /etc/tls/certs/default/ca.crt tune_aio_events: true @@ -2747,187 +2759,1079 @@ require_client_auth: false truststore_file: /etc/tls/certs/default/ca.crt brokers: - - address: nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 9093 - - address: nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 9093 - - address: nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + - address: console-disabled-0.console-disabled.console-disabled.svc.cluster.local. port: 9093 - - address: nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + - address: console-disabled-1.console-disabled.console-disabled.svc.cluster.local. port: 9093 - - address: nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + - address: console-disabled-2.console-disabled.console-disabled.svc.cluster.local. port: 9093 kind: ConfigMap metadata: creationTimestamp: null labels: app.kubernetes.io/component: redpanda - app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/instance: console-disabled app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: redpanda - cluster.redpanda.com/namespace: nodepool-basic-test + cluster.redpanda.com/namespace: console-disabled cluster.redpanda.com/operator: v2 - cluster.redpanda.com/owner: nodepool-basic-test + cluster.redpanda.com/owner: console-disabled helm.sh/chart: redpanda-25.1.1-beta3 - helm.toolkit.fluxcd.io/name: nodepool-basic-test - helm.toolkit.fluxcd.io/namespace: nodepool-basic-test - name: nodepool-basic-test - namespace: nodepool-basic-test + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled + namespace: console-disabled - apiVersion: v1 data: - .bootstrap.json.in: '{"audit_enabled":"false","cloud_storage_cache_size":"5368709120","cloud_storage_enable_remote_read":"true","cloud_storage_enable_remote_write":"true","cloud_storage_enabled":"false","compacted_log_segment_size":"67108864","enable_rack_awareness":"false","enable_sasl":"false","kafka_connection_rate_limit":"1000","kafka_enable_authorization":"false","log_segment_size_max":"268435456","log_segment_size_min":"16777216","max_compacted_log_segment_size":"536870912","storage_min_free_bytes":"1073741824"}' - bootstrap.yaml.fixups: '[]' - redpanda.yaml: |- - config_file: /etc/redpanda/redpanda.yaml - pandaproxy: - pandaproxy_api: - - address: 0.0.0.0 - name: internal - port: 8082 - - address: 0.0.0.0 - name: default - port: 8083 - pandaproxy_api_tls: - - cert_file: /etc/tls/certs/default/tls.crt - enabled: true - key_file: /etc/tls/certs/default/tls.key - name: internal - require_client_auth: false - truststore_file: /etc/tls/certs/default/ca.crt - - cert_file: /etc/tls/certs/external/tls.crt - enabled: true - key_file: /etc/tls/certs/external/tls.key - name: default - require_client_auth: false - truststore_file: /etc/tls/certs/external/ca.crt - pandaproxy_client: - broker_tls: - enabled: true - require_client_auth: false - truststore_file: /etc/tls/certs/default/ca.crt + profile: |- + admin_api: + addresses: + - console-disabled-0:31644 + - console-disabled-1:31644 + - console-disabled-2:31644 + tls: + ca_file: ca.crt + kafka_api: brokers: - - address: nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 9093 - - address: nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 9093 - - address: nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 9093 - - address: nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 9093 - - address: nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 9093 - redpanda: - admin: - - address: 0.0.0.0 - name: internal - port: 9644 - - address: 0.0.0.0 - name: default - port: 9645 - admin_api_tls: - - cert_file: /etc/tls/certs/default/tls.crt - enabled: true - key_file: /etc/tls/certs/default/tls.key - name: internal - require_client_auth: false - truststore_file: /etc/tls/certs/default/ca.crt - - cert_file: /etc/tls/certs/external/tls.crt - enabled: true - key_file: /etc/tls/certs/external/tls.key - name: default - require_client_auth: false - truststore_file: /etc/tls/certs/external/ca.crt - crash_loop_limit: 5 - empty_seed_starts_cluster: false - kafka_api: - - address: 0.0.0.0 - name: internal - port: 9093 - - address: 0.0.0.0 - name: default - port: 9094 - kafka_api_tls: - - cert_file: /etc/tls/certs/default/tls.crt - enabled: true - key_file: /etc/tls/certs/default/tls.key - name: internal - require_client_auth: false - truststore_file: /etc/tls/certs/default/ca.crt - - cert_file: /etc/tls/certs/external/tls.crt - enabled: true - key_file: /etc/tls/certs/external/tls.key - name: default - require_client_auth: false - truststore_file: /etc/tls/certs/external/ca.crt - rpc_server: - address: 0.0.0.0 - port: 33145 - rpc_server_tls: - cert_file: /etc/tls/certs/default/tls.crt - enabled: true - key_file: /etc/tls/certs/default/tls.key - require_client_auth: false - truststore_file: /etc/tls/certs/default/ca.crt - seed_servers: - - host: - address: nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 33145 - - host: - address: nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 33145 - - host: - address: nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 33145 - - host: - address: nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 33145 - - host: - address: nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. - port: 33145 - rpk: - additional_start_flags: - - --default-log-level=info - - --memory=2048M - - --reserve-memory=205M - - --smp=1 - admin_api: - addresses: - - nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 - - nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 - - nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 - - nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 - - nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 - tls: - ca_file: /etc/tls/certs/default/ca.crt - enable_memory_locking: false - kafka_api: - brokers: - - nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 - - nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 - - nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 - - nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 - - nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 - tls: - ca_file: /etc/tls/certs/default/ca.crt - overprovisioned: false - schema_registry: - addresses: - - nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 - - nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 - - nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 - - nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 - - nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 - tls: - ca_file: /etc/tls/certs/default/ca.crt - tune_aio_events: true + - console-disabled-0:31092 + - console-disabled-1:31092 + - console-disabled-2:31092 + tls: + ca_file: ca.crt + name: default schema_registry: - schema_registry_api: - - address: 0.0.0.0 - name: internal - port: 8081 - - address: 0.0.0.0 - name: default - port: 8084 + addresses: + - console-disabled-0:30081 + - console-disabled-1:30081 + - console-disabled-2:30081 + tls: + ca_file: ca.crt + kind: ConfigMap + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-rpk + namespace: console-disabled +- apiVersion: cert-manager.io/v1 + kind: Issuer + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-default-selfsigned-issuer + namespace: console-disabled + spec: + selfSigned: {} + status: {} +- apiVersion: cert-manager.io/v1 + kind: Issuer + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-default-root-issuer + namespace: console-disabled + spec: + ca: + secretName: console-disabled-default-root-certificate + status: {} +- apiVersion: cert-manager.io/v1 + kind: Issuer + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-external-selfsigned-issuer + namespace: console-disabled + spec: + selfSigned: {} + status: {} +- apiVersion: cert-manager.io/v1 + kind: Issuer + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-external-root-issuer + namespace: console-disabled + spec: + ca: + secretName: console-disabled-external-root-certificate + status: {} +- apiVersion: cert-manager.io/v1 + kind: Certificate + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-default-root-certificate + namespace: console-disabled + spec: + commonName: console-disabled-default-root-certificate + duration: 43800h0m0s + isCA: true + issuerRef: + group: cert-manager.io + kind: Issuer + name: console-disabled-default-selfsigned-issuer + privateKey: + algorithm: ECDSA + size: 256 + secretName: console-disabled-default-root-certificate + status: {} +- apiVersion: cert-manager.io/v1 + kind: Certificate + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-external-root-certificate + namespace: console-disabled + spec: + commonName: console-disabled-external-root-certificate + duration: 43800h0m0s + isCA: true + issuerRef: + group: cert-manager.io + kind: Issuer + name: console-disabled-external-selfsigned-issuer + privateKey: + algorithm: ECDSA + size: 256 + secretName: console-disabled-external-root-certificate + status: {} +- apiVersion: cert-manager.io/v1 + kind: Certificate + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-default-cert + namespace: console-disabled + spec: + dnsNames: + - console-disabled-cluster.console-disabled.console-disabled.svc.cluster.local + - console-disabled-cluster.console-disabled.console-disabled.svc + - console-disabled-cluster.console-disabled.console-disabled + - '*.console-disabled-cluster.console-disabled.console-disabled.svc.cluster.local' + - '*.console-disabled-cluster.console-disabled.console-disabled.svc' + - '*.console-disabled-cluster.console-disabled.console-disabled' + - console-disabled.console-disabled.svc.cluster.local + - console-disabled.console-disabled.svc + - console-disabled.console-disabled + - '*.console-disabled.console-disabled.svc.cluster.local' + - '*.console-disabled.console-disabled.svc' + - '*.console-disabled.console-disabled' + duration: 43800h0m0s + issuerRef: + group: cert-manager.io + kind: Issuer + name: console-disabled-default-root-issuer + privateKey: + algorithm: ECDSA + size: 256 + secretName: console-disabled-default-cert + status: {} +- apiVersion: cert-manager.io/v1 + kind: Certificate + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-external-cert + namespace: console-disabled + spec: + dnsNames: + - console-disabled-cluster.console-disabled.console-disabled.svc.cluster.local + - console-disabled-cluster.console-disabled.console-disabled.svc + - console-disabled-cluster.console-disabled.console-disabled + - '*.console-disabled-cluster.console-disabled.console-disabled.svc.cluster.local' + - '*.console-disabled-cluster.console-disabled.console-disabled.svc' + - '*.console-disabled-cluster.console-disabled.console-disabled' + - console-disabled.console-disabled.svc.cluster.local + - console-disabled.console-disabled.svc + - console-disabled.console-disabled + - '*.console-disabled.console-disabled.svc.cluster.local' + - '*.console-disabled.console-disabled.svc' + - '*.console-disabled.console-disabled' + duration: 43800h0m0s + issuerRef: + group: cert-manager.io + kind: Issuer + name: console-disabled-external-root-issuer + privateKey: + algorithm: ECDSA + size: 256 + secretName: console-disabled-external-cert + status: {} +- apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-rpk-debug-bundle + namespace: console-disabled + rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - limitranges + - persistentvolumeclaims + - pods + - pods/log + - replicationcontrollers + - resourcequotas + - serviceaccounts + - services + verbs: + - get + - list +- apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-sidecar + namespace: console-disabled + rules: + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-rpk-debug-bundle + namespace: console-disabled + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: console-disabled-rpk-debug-bundle + subjects: + - kind: ServiceAccount + name: console-disabled + namespace: console-disabled +- apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-sidecar + namespace: console-disabled + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: console-disabled-sidecar + subjects: + - kind: ServiceAccount + name: console-disabled + namespace: console-disabled +- apiVersion: v1 + kind: Secret + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-sts-lifecycle + namespace: console-disabled + stringData: + common.sh: |- + #!/usr/bin/env bash + + # the SERVICE_NAME comes from the metadata.name of the pod, essentially the POD_NAME + CURL_URL="https://${SERVICE_NAME}.console-disabled.console-disabled.svc.cluster.local:9644" + + # commands used throughout + CURL_NODE_ID_CMD="curl --silent --fail --cacert /etc/tls/certs/default/ca.crt ${CURL_URL}/v1/node_config" + + CURL_MAINTENANCE_DELETE_CMD_PREFIX='curl -X DELETE --silent -o /dev/null -w "%{http_code}"' + CURL_MAINTENANCE_PUT_CMD_PREFIX='curl -X PUT --silent -o /dev/null -w "%{http_code}"' + CURL_MAINTENANCE_GET_CMD="curl -X GET --silent --cacert /etc/tls/certs/default/ca.crt ${CURL_URL}/v1/maintenance" + postStart.sh: |- + #!/usr/bin/env bash + # This code should be similar if not exactly the same as that found in the panda-operator, see + # https://github.com/redpanda-data/redpanda/blob/e51d5b7f2ef76d5160ca01b8c7a8cf07593d29b6/src/go/k8s/pkg/resources/secret.go + + # path below should match the path defined on the statefulset + source /var/lifecycle/common.sh + + postStartHook () { + set -x + + touch /tmp/postStartHookStarted + + until NODE_ID=$(${CURL_NODE_ID_CMD} | grep -o '\"node_id\":[^,}]*' | grep -o '[^: ]*$'); do + sleep 0.5 + done + + echo "Clearing maintenance mode on node ${NODE_ID}" + CURL_MAINTENANCE_DELETE_CMD="${CURL_MAINTENANCE_DELETE_CMD_PREFIX} --cacert /etc/tls/certs/default/ca.crt ${CURL_URL}/v1/brokers/${NODE_ID}/maintenance" + # a 400 here would mean not in maintenance mode + until [ "${status:-}" = '"200"' ] || [ "${status:-}" = '"400"' ]; do + status=$(${CURL_MAINTENANCE_DELETE_CMD}) + sleep 0.5 + done + + touch /tmp/postStartHookFinished + } + + postStartHook + true + preStop.sh: |- + #!/usr/bin/env bash + # This code should be similar if not exactly the same as that found in the panda-operator, see + # https://github.com/redpanda-data/redpanda/blob/e51d5b7f2ef76d5160ca01b8c7a8cf07593d29b6/src/go/k8s/pkg/resources/secret.go + + touch /tmp/preStopHookStarted + + # path below should match the path defined on the statefulset + source /var/lifecycle/common.sh + + set -x + + preStopHook () { + until NODE_ID=$(${CURL_NODE_ID_CMD} | grep -o '\"node_id\":[^,}]*' | grep -o '[^: ]*$'); do + sleep 0.5 + done + + echo "Setting maintenance mode on node ${NODE_ID}" + CURL_MAINTENANCE_PUT_CMD="${CURL_MAINTENANCE_PUT_CMD_PREFIX} --cacert /etc/tls/certs/default/ca.crt ${CURL_URL}/v1/brokers/${NODE_ID}/maintenance" + until [ "${status:-}" = '"200"' ]; do + status=$(${CURL_MAINTENANCE_PUT_CMD}) + sleep 0.5 + done + + until [ "${finished:-}" = "true" ] || [ "${draining:-}" = "false" ]; do + res=$(${CURL_MAINTENANCE_GET_CMD}) + finished=$(echo $res | grep -o '\"finished\":[^,}]*' | grep -o '[^: ]*$') + draining=$(echo $res | grep -o '\"draining\":[^,}]*' | grep -o '[^: ]*$') + sleep 0.5 + done + + touch /tmp/preStopHookFinished + } + preStopHook + true + type: Opaque +- apiVersion: v1 + kind: Secret + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: console-disabled + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + name: console-disabled-configurator + namespace: console-disabled + stringData: + configurator.sh: |- + set -xe + SERVICE_NAME=$1 + KUBERNETES_NODE_NAME=$2 + POD_ORDINAL=${SERVICE_NAME##*-} + BROKER_INDEX=`expr $POD_ORDINAL + 1` + + CONFIG=/etc/redpanda/redpanda.yaml + + # Setup config files + cp /tmp/base-config/redpanda.yaml "${CONFIG}" + + LISTENER="{\"address\":\"${SERVICE_NAME}.console-disabled.console-disabled.svc.cluster.local.\",\"name\":\"internal\",\"port\":9093}" + rpk redpanda config --config "$CONFIG" set redpanda.advertised_kafka_api[0] "$LISTENER" + + ADVERTISED_KAFKA_ADDRESSES=() + + PREFIX_TEMPLATE="" + ADVERTISED_KAFKA_ADDRESSES+=("{\"address\":\"${SERVICE_NAME}\",\"name\":\"default\",\"port\":31092}") + + PREFIX_TEMPLATE="" + ADVERTISED_KAFKA_ADDRESSES+=("{\"address\":\"${SERVICE_NAME}\",\"name\":\"default\",\"port\":31092}") + + PREFIX_TEMPLATE="" + ADVERTISED_KAFKA_ADDRESSES+=("{\"address\":\"${SERVICE_NAME}\",\"name\":\"default\",\"port\":31092}") + + rpk redpanda config --config "$CONFIG" set redpanda.advertised_kafka_api[1] "${ADVERTISED_KAFKA_ADDRESSES[$POD_ORDINAL]}" + + LISTENER="{\"address\":\"${SERVICE_NAME}.console-disabled.console-disabled.svc.cluster.local.\",\"name\":\"internal\",\"port\":8082}" + rpk redpanda config --config "$CONFIG" set pandaproxy.advertised_pandaproxy_api[0] "$LISTENER" + + ADVERTISED_HTTP_ADDRESSES=() + + PREFIX_TEMPLATE="" + ADVERTISED_HTTP_ADDRESSES+=("{\"address\":\"${SERVICE_NAME}\",\"name\":\"default\",\"port\":30082}") + + PREFIX_TEMPLATE="" + ADVERTISED_HTTP_ADDRESSES+=("{\"address\":\"${SERVICE_NAME}\",\"name\":\"default\",\"port\":30082}") + + PREFIX_TEMPLATE="" + ADVERTISED_HTTP_ADDRESSES+=("{\"address\":\"${SERVICE_NAME}\",\"name\":\"default\",\"port\":30082}") + + rpk redpanda config --config "$CONFIG" set pandaproxy.advertised_pandaproxy_api[1] "${ADVERTISED_HTTP_ADDRESSES[$POD_ORDINAL]}" + type: Opaque +- metadata: + creationTimestamp: null + labels: + cluster.redpanda.com/namespace: console-disabled + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: console-disabled + helm.toolkit.fluxcd.io/name: console-disabled + helm.toolkit.fluxcd.io/namespace: console-disabled + spec: + cluster: + clusterRef: + name: console-disabled + secret: {} + status: {} +-- nodepool-basic-test -- +- apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: nodepool-basic-test + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: nodepool-basic-test + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: nodepool-basic-test + helm.toolkit.fluxcd.io/namespace: nodepool-basic-test + name: nodepool-basic-test-external + namespace: nodepool-basic-test + spec: + externalTrafficPolicy: Local + ports: + - name: admin-default + nodePort: 31644 + port: 9645 + protocol: TCP + targetPort: 0 + - name: kafka-default + nodePort: 31092 + port: 9094 + protocol: TCP + targetPort: 0 + - name: http-default + nodePort: 30082 + port: 8083 + protocol: TCP + targetPort: 0 + - name: schema-default + nodePort: 30081 + port: 8084 + protocol: TCP + targetPort: 0 + publishNotReadyAddresses: true + selector: + app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/name: redpanda + sessionAffinity: None + type: NodePort + status: + loadBalancer: {} +- apiVersion: policy/v1 + kind: PodDisruptionBudget + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: nodepool-basic-test + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: nodepool-basic-test + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: nodepool-basic-test + helm.toolkit.fluxcd.io/namespace: nodepool-basic-test + name: nodepool-basic-test + namespace: nodepool-basic-test + spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/name: redpanda + redpanda.com/poddisruptionbudget: nodepool-basic-test + status: + currentHealthy: 0 + desiredHealthy: 0 + disruptionsAllowed: 0 + expectedPods: 0 +- apiVersion: v1 + automountServiceAccountToken: false + kind: ServiceAccount + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: nodepool-basic-test + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: nodepool-basic-test + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: nodepool-basic-test + helm.toolkit.fluxcd.io/namespace: nodepool-basic-test + name: nodepool-basic-test + namespace: nodepool-basic-test +- apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: nodepool-basic-test + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: nodepool-basic-test + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: nodepool-basic-test + helm.toolkit.fluxcd.io/namespace: nodepool-basic-test + monitoring.redpanda.com/enabled: "false" + name: nodepool-basic-test + namespace: nodepool-basic-test + spec: + clusterIP: None + ports: + - name: admin + port: 9644 + protocol: TCP + targetPort: 9644 + - name: http + port: 8082 + protocol: TCP + targetPort: 8082 + - name: kafka + port: 9093 + protocol: TCP + targetPort: 9093 + - name: rpc + port: 33145 + protocol: TCP + targetPort: 33145 + - name: schemaregistry + port: 8081 + protocol: TCP + targetPort: 8081 + publishNotReadyAddresses: true + selector: + app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/name: redpanda + type: ClusterIP + status: + loadBalancer: {} +- apiVersion: v1 + data: + .bootstrap.json.in: '{"audit_enabled":"false","cloud_storage_cache_size":"5368709120","cloud_storage_enable_remote_read":"true","cloud_storage_enable_remote_write":"true","cloud_storage_enabled":"false","compacted_log_segment_size":"67108864","default_topic_replications":"3","enable_rack_awareness":"false","enable_sasl":"false","kafka_connection_rate_limit":"1000","kafka_enable_authorization":"false","log_segment_size_max":"268435456","log_segment_size_min":"16777216","max_compacted_log_segment_size":"536870912","storage_min_free_bytes":"1073741824"}' + bootstrap.yaml.fixups: '[]' + redpanda.yaml: |- + config_file: /etc/redpanda/redpanda.yaml + pandaproxy: + pandaproxy_api: + - address: 0.0.0.0 + name: internal + port: 8082 + - address: 0.0.0.0 + name: default + port: 8083 + pandaproxy_api_tls: + - cert_file: /etc/tls/certs/default/tls.crt + enabled: true + key_file: /etc/tls/certs/default/tls.key + name: internal + require_client_auth: false + truststore_file: /etc/tls/certs/default/ca.crt + - cert_file: /etc/tls/certs/external/tls.crt + enabled: true + key_file: /etc/tls/certs/external/tls.key + name: default + require_client_auth: false + truststore_file: /etc/tls/certs/external/ca.crt + pandaproxy_client: + broker_tls: + enabled: true + require_client_auth: false + truststore_file: /etc/tls/certs/default/ca.crt + brokers: + - address: nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + - address: nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + - address: nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + - address: nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + - address: nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + redpanda: + admin: + - address: 0.0.0.0 + name: internal + port: 9644 + - address: 0.0.0.0 + name: default + port: 9645 + admin_api_tls: + - cert_file: /etc/tls/certs/default/tls.crt + enabled: true + key_file: /etc/tls/certs/default/tls.key + name: internal + require_client_auth: false + truststore_file: /etc/tls/certs/default/ca.crt + - cert_file: /etc/tls/certs/external/tls.crt + enabled: true + key_file: /etc/tls/certs/external/tls.key + name: default + require_client_auth: false + truststore_file: /etc/tls/certs/external/ca.crt + crash_loop_limit: 5 + empty_seed_starts_cluster: false + kafka_api: + - address: 0.0.0.0 + name: internal + port: 9093 + - address: 0.0.0.0 + name: default + port: 9094 + kafka_api_tls: + - cert_file: /etc/tls/certs/default/tls.crt + enabled: true + key_file: /etc/tls/certs/default/tls.key + name: internal + require_client_auth: false + truststore_file: /etc/tls/certs/default/ca.crt + - cert_file: /etc/tls/certs/external/tls.crt + enabled: true + key_file: /etc/tls/certs/external/tls.key + name: default + require_client_auth: false + truststore_file: /etc/tls/certs/external/ca.crt + rpc_server: + address: 0.0.0.0 + port: 33145 + rpc_server_tls: + cert_file: /etc/tls/certs/default/tls.crt + enabled: true + key_file: /etc/tls/certs/default/tls.key + require_client_auth: false + truststore_file: /etc/tls/certs/default/ca.crt + seed_servers: + - host: + address: nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 33145 + - host: + address: nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 33145 + - host: + address: nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 33145 + - host: + address: nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 33145 + - host: + address: nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 33145 + rpk: + additional_start_flags: + - --default-log-level=info + - --memory=2048M + - --reserve-memory=205M + - --smp=1 + admin_api: + addresses: + - nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 + - nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 + - nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 + - nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 + - nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 + tls: + ca_file: /etc/tls/certs/default/ca.crt + enable_memory_locking: false + kafka_api: + brokers: + - nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 + - nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 + - nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 + - nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 + - nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 + tls: + ca_file: /etc/tls/certs/default/ca.crt + overprovisioned: false + schema_registry: + addresses: + - nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 + - nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 + - nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 + - nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 + - nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 + tls: + ca_file: /etc/tls/certs/default/ca.crt + tune_aio_events: true + schema_registry: + schema_registry_api: + - address: 0.0.0.0 + name: internal + port: 8081 + - address: 0.0.0.0 + name: default + port: 8084 + schema_registry_api_tls: + - cert_file: /etc/tls/certs/default/tls.crt + enabled: true + key_file: /etc/tls/certs/default/tls.key + name: internal + require_client_auth: false + truststore_file: /etc/tls/certs/default/ca.crt + - cert_file: /etc/tls/certs/external/tls.crt + enabled: true + key_file: /etc/tls/certs/external/tls.key + name: default + require_client_auth: false + truststore_file: /etc/tls/certs/external/ca.crt + schema_registry_client: + broker_tls: + enabled: true + require_client_auth: false + truststore_file: /etc/tls/certs/default/ca.crt + brokers: + - address: nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + - address: nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + - address: nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + - address: nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + - address: nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + kind: ConfigMap + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: nodepool-basic-test + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: nodepool-basic-test + helm.sh/chart: redpanda-25.1.1-beta3 + helm.toolkit.fluxcd.io/name: nodepool-basic-test + helm.toolkit.fluxcd.io/namespace: nodepool-basic-test + name: nodepool-basic-test + namespace: nodepool-basic-test +- apiVersion: v1 + data: + .bootstrap.json.in: '{"audit_enabled":"false","cloud_storage_cache_size":"5368709120","cloud_storage_enable_remote_read":"true","cloud_storage_enable_remote_write":"true","cloud_storage_enabled":"false","compacted_log_segment_size":"67108864","enable_rack_awareness":"false","enable_sasl":"false","kafka_connection_rate_limit":"1000","kafka_enable_authorization":"false","log_segment_size_max":"268435456","log_segment_size_min":"16777216","max_compacted_log_segment_size":"536870912","storage_min_free_bytes":"1073741824"}' + bootstrap.yaml.fixups: '[]' + redpanda.yaml: |- + config_file: /etc/redpanda/redpanda.yaml + pandaproxy: + pandaproxy_api: + - address: 0.0.0.0 + name: internal + port: 8082 + - address: 0.0.0.0 + name: default + port: 8083 + pandaproxy_api_tls: + - cert_file: /etc/tls/certs/default/tls.crt + enabled: true + key_file: /etc/tls/certs/default/tls.key + name: internal + require_client_auth: false + truststore_file: /etc/tls/certs/default/ca.crt + - cert_file: /etc/tls/certs/external/tls.crt + enabled: true + key_file: /etc/tls/certs/external/tls.key + name: default + require_client_auth: false + truststore_file: /etc/tls/certs/external/ca.crt + pandaproxy_client: + broker_tls: + enabled: true + require_client_auth: false + truststore_file: /etc/tls/certs/default/ca.crt + brokers: + - address: nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + - address: nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + - address: nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + - address: nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + - address: nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 9093 + redpanda: + admin: + - address: 0.0.0.0 + name: internal + port: 9644 + - address: 0.0.0.0 + name: default + port: 9645 + admin_api_tls: + - cert_file: /etc/tls/certs/default/tls.crt + enabled: true + key_file: /etc/tls/certs/default/tls.key + name: internal + require_client_auth: false + truststore_file: /etc/tls/certs/default/ca.crt + - cert_file: /etc/tls/certs/external/tls.crt + enabled: true + key_file: /etc/tls/certs/external/tls.key + name: default + require_client_auth: false + truststore_file: /etc/tls/certs/external/ca.crt + crash_loop_limit: 5 + empty_seed_starts_cluster: false + kafka_api: + - address: 0.0.0.0 + name: internal + port: 9093 + - address: 0.0.0.0 + name: default + port: 9094 + kafka_api_tls: + - cert_file: /etc/tls/certs/default/tls.crt + enabled: true + key_file: /etc/tls/certs/default/tls.key + name: internal + require_client_auth: false + truststore_file: /etc/tls/certs/default/ca.crt + - cert_file: /etc/tls/certs/external/tls.crt + enabled: true + key_file: /etc/tls/certs/external/tls.key + name: default + require_client_auth: false + truststore_file: /etc/tls/certs/external/ca.crt + rpc_server: + address: 0.0.0.0 + port: 33145 + rpc_server_tls: + cert_file: /etc/tls/certs/default/tls.crt + enabled: true + key_file: /etc/tls/certs/default/tls.key + require_client_auth: false + truststore_file: /etc/tls/certs/default/ca.crt + seed_servers: + - host: + address: nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 33145 + - host: + address: nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 33145 + - host: + address: nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 33145 + - host: + address: nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 33145 + - host: + address: nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local. + port: 33145 + rpk: + additional_start_flags: + - --default-log-level=info + - --memory=2048M + - --reserve-memory=205M + - --smp=1 + admin_api: + addresses: + - nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 + - nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 + - nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 + - nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 + - nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 + tls: + ca_file: /etc/tls/certs/default/ca.crt + enable_memory_locking: false + kafka_api: + brokers: + - nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 + - nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 + - nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 + - nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 + - nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 + tls: + ca_file: /etc/tls/certs/default/ca.crt + overprovisioned: false + schema_registry: + addresses: + - nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 + - nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 + - nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 + - nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 + - nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 + tls: + ca_file: /etc/tls/certs/default/ca.crt + tune_aio_events: true + schema_registry: + schema_registry_api: + - address: 0.0.0.0 + name: internal + port: 8081 + - address: 0.0.0.0 + name: default + port: 8084 schema_registry_api_tls: - cert_file: /etc/tls/certs/default/tls.crt enabled: true @@ -3949,3 +4853,17 @@ path: secrets/nodepool-basic-test-default-cert/ca.crt name: nodepool-basic-test-default-cert status: {} +- metadata: + creationTimestamp: null + labels: + cluster.redpanda.com/namespace: nodepool-basic-test + cluster.redpanda.com/operator: v2 + cluster.redpanda.com/owner: nodepool-basic-test + helm.toolkit.fluxcd.io/name: nodepool-basic-test + helm.toolkit.fluxcd.io/namespace: nodepool-basic-test + spec: + cluster: + clusterRef: + name: nodepool-basic-test + secret: {} + status: {} diff --git a/operator/internal/lifecycle/testdata/cases.txtar b/operator/internal/lifecycle/testdata/cases.txtar index b26511203..3652a8c3b 100644 --- a/operator/internal/lifecycle/testdata/cases.txtar +++ b/operator/internal/lifecycle/testdata/cases.txtar @@ -1,4 +1,13 @@ -- basic-test -- +-- console-disabled -- +apiVersion: cluster.redpanda.com/v1alpha2 +kind: Redpanda +metadata: + name: compat-test +spec: + clusterSpec: + console: + enabled: false -- nodepool-basic-test -- --- apiVersion: cluster.redpanda.com/v1alpha2 @@ -156,4 +165,4 @@ spec: memory: 256Mi limits: cpu: 100m - memory: 256Mi \ No newline at end of file + memory: 256Mi diff --git a/operator/internal/lifecycle/testdata/cases.values.golden.txtar b/operator/internal/lifecycle/testdata/cases.values.golden.txtar index fa97b0b83..e40213373 100644 --- a/operator/internal/lifecycle/testdata/cases.values.golden.txtar +++ b/operator/internal/lifecycle/testdata/cases.values.golden.txtar @@ -1018,6 +1018,372 @@ values: secretRef: null enabled: true tuning: {} +-- console-disabled -- +pools: [] +values: + auditLogging: + clientMaxBufferSize: 16777216 + enabled: false + enabledEventTypes: null + excludedPrincipals: null + excludedTopics: null + listener: internal + partitions: 12 + queueDrainIntervalMs: 500 + queueMaxBufferSizePerShard: 1048576 + replicationFactor: 0 + auth: + sasl: + bootstrapUser: + mechanism: SCRAM-SHA-256 + name: null + password: null + secretKeyRef: null + enabled: false + mechanism: SCRAM-SHA-512 + secretRef: redpanda-users + users: [] + clusterDomain: cluster.local. + commonLabels: {} + config: + cluster: {} + extraClusterConfiguration: {} + node: + crash_loop_limit: 5 + pandaproxy_client: + consumer_heartbeat_interval_ms: 0 + consumer_rebalance_timeout_ms: 0 + consumer_request_max_bytes: 0 + consumer_request_timeout_ms: 0 + consumer_session_timeout_ms: 0 + produce_batch_delay_ms: 0 + produce_batch_record_count: 0 + produce_batch_size_bytes: 0 + retries: 0 + retry_base_backoff_ms: 0 + rpk: {} + schema_registry_client: + consumer_heartbeat_interval_ms: 0 + consumer_rebalance_timeout_ms: 0 + consumer_request_max_bytes: 0 + consumer_request_timeout_ms: 0 + consumer_session_timeout_ms: 0 + produce_batch_delay_ms: 0 + produce_batch_record_count: 0 + produce_batch_size_bytes: 0 + retries: 0 + retry_base_backoff_ms: 0 + tunable: + compacted_log_segment_size: 67108864 + kafka_connection_rate_limit: 1000 + log_segment_size_max: 268435456 + log_segment_size_min: 16777216 + max_compacted_log_segment_size: 536870912 + console: + configmap: + create: false + deployment: + create: false + enabled: false + secret: + create: false + enterprise: + license: "" + external: + addresses: null + annotations: null + domain: null + enabled: true + externalDns: null + prefixTemplate: "" + service: + enabled: true + sourceRanges: null + type: NodePort + force: false + fullnameOverride: "" + image: + repository: redpandadata/redpanda + tag: v25.2.1 + license_key: "" + listeners: + admin: + enabled: false + external: + default: + advertisedPorts: + - 31644 + enabled: null + nodePort: null + port: 9645 + tls: + cert: external + enabled: null + requireClientAuth: null + trustStore: null + port: 9644 + tls: + cert: default + enabled: null + requireClientAuth: false + trustStore: null + http: + enabled: true + external: + default: + advertisedPorts: + - 30082 + enabled: null + nodePort: null + port: 8083 + tls: + cert: external + enabled: null + requireClientAuth: false + trustStore: null + port: 8082 + tls: + cert: default + enabled: null + requireClientAuth: false + trustStore: null + kafka: + enabled: false + external: + default: + advertisedPorts: + - 31092 + enabled: null + nodePort: null + port: 9094 + tls: + cert: external + enabled: null + requireClientAuth: null + trustStore: null + port: 9093 + tls: + cert: default + enabled: null + requireClientAuth: false + trustStore: null + rpc: + port: 33145 + tls: + cert: default + enabled: null + requireClientAuth: false + trustStore: null + schemaRegistry: + enabled: true + external: + default: + advertisedPorts: + - 30081 + enabled: null + nodePort: null + port: 8084 + tls: + cert: external + enabled: null + requireClientAuth: false + trustStore: null + port: 8081 + tls: + cert: default + enabled: null + requireClientAuth: false + trustStore: null + logging: + logLevel: info + usageStats: + clusterId: null + enabled: true + monitoring: + enableHttp2: null + enabled: false + labels: {} + scrapeInterval: 30s + tlsConfig: null + nameOverride: "" + podTemplate: + spec: + securityContext: + fsGroup: 101 + fsGroupChangePolicy: OnRootMismatch + runAsUser: 101 + post_install_job: + annotations: null + enabled: true + labels: null + podTemplate: + spec: + containers: + - name: post-install + securityContext: {} + securityContext: {} + rackAwareness: + enabled: false + nodeAnnotation: topology.kubernetes.io/zone + rbac: + annotations: {} + enabled: true + rpkDebugBundle: true + resources: + cpu: + cores: "1" + overprovisioned: null + memory: + container: + max: 2560Mi + min: null + enable_memory_locking: null + redpanda: null + service: null + serviceAccount: + annotations: {} + create: true + name: "" + statefulset: + additionalRedpandaCmdFlags: [] + additionalSelectorLabels: {} + budget: + maxUnavailable: 1 + initContainerImage: + repository: busybox + tag: latest + initContainers: + configurator: {} + fsValidator: + enabled: false + expectedFS: xfs + setDataDirOwnership: + enabled: false + podAntiAffinity: + custom: {} + topologyKey: kubernetes.io/hostname + type: hard + weight: 100 + podTemplate: + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: '{{ include "redpanda.name" . }}-statefulset' + app.kubernetes.io/instance: '{{ .Release.Name }}' + app.kubernetes.io/name: '{{ include "redpanda.name" . }}' + topologyKey: kubernetes.io/hostname + containers: + - name: redpanda + - name: sidecar + readinessProbe: {} + resources: {} + securityContext: {} + initContainers: + - name: redpanda-configurator + resources: {} + priorityClassName: "" + securityContext: {} + terminationGracePeriodSeconds: 90 + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/component: '{{ include "redpanda.name" . }}-statefulset' + app.kubernetes.io/instance: '{{ .Release.Name }}' + app.kubernetes.io/name: '{{ include "redpanda.name" . }}' + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + replicas: 3 + sideCars: + args: + - --no-set-superusers + brokerDecommissioner: + decommissionAfter: 60s + decommissionRequeueTimeout: 10s + enabled: false + configWatcher: + enabled: true + controllers: + createRBAC: true + enabled: false + healthProbeAddress: :8085 + image: + repository: localhost/redpanda-operator + tag: dev + metricsAddress: :9082 + pprofAddress: :9083 + run: + - all + image: + repository: localhost/redpanda-operator + tag: dev + pvcUnbinder: + enabled: false + unbindAfter: 60s + updateStrategy: + type: RollingUpdate + storage: + hostPath: "" + persistentVolume: + annotations: {} + enabled: true + labels: {} + nameOverwrite: "" + size: 20Gi + storageClass: "" + tiered: + config: + cloud_storage_cache_size: 5368709120 + cloud_storage_enable_remote_read: true + cloud_storage_enable_remote_write: true + cloud_storage_enabled: false + credentialsSecretRef: + accessKey: + configurationKey: cloud_storage_access_key + key: "" + name: "" + secretKey: + configurationKey: cloud_storage_secret_key + key: "" + name: "" + hostPath: "" + mountType: none + persistentVolume: + annotations: {} + enabled: false + labels: {} + nameOverwrite: "" + size: "" + storageClass: "" + tieredConfig: null + tieredStorageHostPath: "" + tieredStoragePersistentVolume: null + tests: + enabled: true + tls: + certs: + default: + applyInternalDNSNames: null + caEnabled: true + clientSecretRef: null + duration: "" + enabled: null + issuerRef: null + secretRef: null + external: + applyInternalDNSNames: null + caEnabled: true + clientSecretRef: null + duration: "" + enabled: null + issuerRef: null + secretRef: null + enabled: true + tuning: + tune_aio_events: true -- nodepool-basic-test -- pools: - Generation: "0" diff --git a/operator/internal/lifecycle/v2_simple_resources.go b/operator/internal/lifecycle/v2_simple_resources.go index 239ec2af7..c7fbcd46a 100644 --- a/operator/internal/lifecycle/v2_simple_resources.go +++ b/operator/internal/lifecycle/v2_simple_resources.go @@ -42,7 +42,8 @@ func (m *V2SimpleResourceRenderer) Render(ctx context.Context, cluster *ClusterW spec := cluster.Spec.ClusterSpec.DeepCopy() if spec != nil { - // normalize the spec by removing the connectors stanza which is deprecated + // normalize the spec by removing the connectors and console stanzas which are deprecated + spec.Console = nil spec.Connectors = nil } @@ -58,7 +59,47 @@ func (m *V2SimpleResourceRenderer) Render(ctx context.Context, cluster *ClusterW return nil, err } - return redpanda.RenderResources(state) + resources, err := redpanda.RenderResources(state) + if err != nil { + return nil, err + } + + console, err := m.consoleIntegration(cluster, spec.Console) + if err != nil { + return nil, err + } + + if console != nil { + resources = append(resources, console) + } + + return resources, err +} + +func (m *V2SimpleResourceRenderer) consoleIntegration( + cluster *ClusterWithPools, + console *redpandav1alpha2.RedpandaConsole, +) (*redpandav1alpha2.Console, error) { + values, err := redpandav1alpha2.ConvertConsoleSubchartToConsoleValues(console) + if err != nil { + return nil, err + } + + // Values can be nil if console is disabled. + if values == nil { + return nil, nil + } + + return &redpandav1alpha2.Console{ + Spec: redpandav1alpha2.ConsoleSpec{ + ConsoleValues: *values, + ClusterSource: &redpandav1alpha2.ClusterSource{ + ClusterRef: &redpandav1alpha2.ClusterRef{ + Name: cluster.Name, + }, + }, + }, + }, nil } // WatchedResourceTypes returns the list of all the resources that the cluster diff --git a/operator/pkg/functional/map.go b/operator/pkg/functional/map.go index 3004712d2..312154625 100644 --- a/operator/pkg/functional/map.go +++ b/operator/pkg/functional/map.go @@ -44,7 +44,7 @@ func deepCopy(v any) any { } func deepCopyElements(v []any) []any { - copied := make([]any, len(v)) + copied := make([]any, 0, len(v)) for _, value := range v { switch cast := value.(type) { case map[string]any: From 53cda4053b9786b7a42896072a90e233931d7971 Mon Sep 17 00:00:00 2001 From: Andrew Stucki Date: Tue, 25 Nov 2025 09:33:44 -0500 Subject: [PATCH 02/12] add partial rendering --- acceptance/features/console-upgrades.feature | 0 charts/console/rendervalues.go | 4 +- charts/console/rendervalues_partial.gen.go | 77 +++++++++-------- gen/partial/partial.go | 86 ++++++++++++++++--- go.work.sum | 10 +++ operator/api/redpanda/v1alpha2/conversion.go | 12 +-- .../v1alpha2/zz_generated.conversion.go | 19 +--- 7 files changed, 137 insertions(+), 71 deletions(-) create mode 100644 acceptance/features/console-upgrades.feature diff --git a/acceptance/features/console-upgrades.feature b/acceptance/features/console-upgrades.feature new file mode 100644 index 000000000..e69de29bb diff --git a/charts/console/rendervalues.go b/charts/console/rendervalues.go index e1ea9d89f..8477360fd 100644 --- a/charts/console/rendervalues.go +++ b/charts/console/rendervalues.go @@ -55,8 +55,8 @@ type RenderValues struct { SecretMounts []SecretMount `json:"secretMounts"` Secret SecretConfig `json:"secret"` LicenseSecretRef *corev1.SecretKeySelector `json:"licenseSecretRef,omitempty"` - LivenessProbe corev1.Probe `json:"livenessProbe"` - ReadinessProbe corev1.Probe `json:"readinessProbe"` + LivenessProbe corev1.Probe `json:"livenessProbe" partial:"builtin"` + ReadinessProbe corev1.Probe `json:"readinessProbe" partial:"builtin"` ConfigMap Creatable `json:"configmap"` Deployment DeploymentConfig `json:"deployment"` Strategy appsv1.DeploymentStrategy `json:"strategy"` diff --git a/charts/console/rendervalues_partial.gen.go b/charts/console/rendervalues_partial.gen.go index 2f566460f..313e5b718 100644 --- a/charts/console/rendervalues_partial.gen.go +++ b/charts/console/rendervalues_partial.gen.go @@ -17,47 +17,48 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + applycorev1 "k8s.io/client-go/applyconfigurations/core/v1" ) type PartialRenderValues struct { - ReplicaCount *int32 "json:\"replicaCount,omitempty\"" - NameOverride *string "json:\"nameOverride,omitempty\"" - CommonLabels map[string]string "json:\"commonLabels,omitempty\"" - FullnameOverride *string "json:\"fullnameOverride,omitempty\"" - Image *PartialImage "json:\"image,omitempty\"" - ImagePullSecrets []corev1.LocalObjectReference "json:\"imagePullSecrets,omitempty\"" - AutomountServiceAccountToken *bool "json:\"automountServiceAccountToken,omitempty\"" - ServiceAccount *PartialServiceAccountConfig "json:\"serviceAccount,omitempty\"" - Annotations map[string]string "json:\"annotations,omitempty\"" - PodAnnotations map[string]string "json:\"podAnnotations,omitempty\"" - PodLabels map[string]string "json:\"podLabels,omitempty\"" - PodSecurityContext *corev1.PodSecurityContext "json:\"podSecurityContext,omitempty\"" - SecurityContext *corev1.SecurityContext "json:\"securityContext,omitempty\"" - Service *PartialServiceConfig "json:\"service,omitempty\"" - Ingress *PartialIngressConfig "json:\"ingress,omitempty\"" - Resources *corev1.ResourceRequirements "json:\"resources,omitempty\"" - Autoscaling *PartialAutoScaling "json:\"autoscaling,omitempty\"" - NodeSelector map[string]string "json:\"nodeSelector,omitempty\"" - Tolerations []corev1.Toleration "json:\"tolerations,omitempty\"" - Affinity *corev1.Affinity "json:\"affinity,omitempty\"" - TopologySpreadConstraints []corev1.TopologySpreadConstraint "json:\"topologySpreadConstraints,omitempty\"" - PriorityClassName *string "json:\"priorityClassName,omitempty\"" - Config map[string]any "json:\"config,omitempty\"" - ExtraEnv []corev1.EnvVar "json:\"extraEnv,omitempty\"" - ExtraEnvFrom []corev1.EnvFromSource "json:\"extraEnvFrom,omitempty\"" - ExtraVolumes []corev1.Volume "json:\"extraVolumes,omitempty\"" - ExtraVolumeMounts []corev1.VolumeMount "json:\"extraVolumeMounts,omitempty\"" - ExtraContainers []corev1.Container "json:\"extraContainers,omitempty\"" - ExtraContainerPorts []corev1.ContainerPort "json:\"extraContainerPorts,omitempty\"" - InitContainers *PartialInitContainers "json:\"initContainers,omitempty\"" - SecretMounts []PartialSecretMount "json:\"secretMounts,omitempty\"" - Secret *PartialSecretConfig "json:\"secret,omitempty\"" - LicenseSecretRef *corev1.SecretKeySelector "json:\"licenseSecretRef,omitempty\"" - LivenessProbe *corev1.Probe "json:\"livenessProbe,omitempty\"" - ReadinessProbe *corev1.Probe "json:\"readinessProbe,omitempty\"" - ConfigMap *PartialCreatable "json:\"configmap,omitempty\"" - Deployment *PartialDeploymentConfig "json:\"deployment,omitempty\"" - Strategy *appsv1.DeploymentStrategy "json:\"strategy,omitempty\"" + ReplicaCount *int32 "json:\"replicaCount,omitempty\"" + NameOverride *string "json:\"nameOverride,omitempty\"" + CommonLabels map[string]string "json:\"commonLabels,omitempty\"" + FullnameOverride *string "json:\"fullnameOverride,omitempty\"" + Image *PartialImage "json:\"image,omitempty\"" + ImagePullSecrets []corev1.LocalObjectReference "json:\"imagePullSecrets,omitempty\"" + AutomountServiceAccountToken *bool "json:\"automountServiceAccountToken,omitempty\"" + ServiceAccount *PartialServiceAccountConfig "json:\"serviceAccount,omitempty\"" + Annotations map[string]string "json:\"annotations,omitempty\"" + PodAnnotations map[string]string "json:\"podAnnotations,omitempty\"" + PodLabels map[string]string "json:\"podLabels,omitempty\"" + PodSecurityContext *corev1.PodSecurityContext "json:\"podSecurityContext,omitempty\"" + SecurityContext *corev1.SecurityContext "json:\"securityContext,omitempty\"" + Service *PartialServiceConfig "json:\"service,omitempty\"" + Ingress *PartialIngressConfig "json:\"ingress,omitempty\"" + Resources *corev1.ResourceRequirements "json:\"resources,omitempty\"" + Autoscaling *PartialAutoScaling "json:\"autoscaling,omitempty\"" + NodeSelector map[string]string "json:\"nodeSelector,omitempty\"" + Tolerations []corev1.Toleration "json:\"tolerations,omitempty\"" + Affinity *corev1.Affinity "json:\"affinity,omitempty\"" + TopologySpreadConstraints []corev1.TopologySpreadConstraint "json:\"topologySpreadConstraints,omitempty\"" + PriorityClassName *string "json:\"priorityClassName,omitempty\"" + Config map[string]any "json:\"config,omitempty\"" + ExtraEnv []corev1.EnvVar "json:\"extraEnv,omitempty\"" + ExtraEnvFrom []corev1.EnvFromSource "json:\"extraEnvFrom,omitempty\"" + ExtraVolumes []corev1.Volume "json:\"extraVolumes,omitempty\"" + ExtraVolumeMounts []corev1.VolumeMount "json:\"extraVolumeMounts,omitempty\"" + ExtraContainers []corev1.Container "json:\"extraContainers,omitempty\"" + ExtraContainerPorts []corev1.ContainerPort "json:\"extraContainerPorts,omitempty\"" + InitContainers *PartialInitContainers "json:\"initContainers,omitempty\"" + SecretMounts []PartialSecretMount "json:\"secretMounts,omitempty\"" + Secret *PartialSecretConfig "json:\"secret,omitempty\"" + LicenseSecretRef *corev1.SecretKeySelector "json:\"licenseSecretRef,omitempty\"" + LivenessProbe *applycorev1.ProbeApplyConfiguration "json:\"livenessProbe,omitempty\"" + ReadinessProbe *applycorev1.ProbeApplyConfiguration "json:\"readinessProbe,omitempty\"" + ConfigMap *PartialCreatable "json:\"configmap,omitempty\"" + Deployment *PartialDeploymentConfig "json:\"deployment,omitempty\"" + Strategy *appsv1.DeploymentStrategy "json:\"strategy,omitempty\"" } type PartialImage struct { diff --git a/gen/partial/partial.go b/gen/partial/partial.go index adcf29faa..c24b87049 100644 --- a/gen/partial/partial.go +++ b/gen/partial/partial.go @@ -45,6 +45,18 @@ const ( mode = packages.NeedTypes | packages.NeedName | packages.NeedSyntax | packages.NeedTypesInfo | packages.NeedImports ) +type partialImport struct { + Name string + Path string +} + +var packagePartials = map[string]partialImport{ + "k8s.io/api/core/v1": { + Name: "applycorev1", + Path: "k8s.io/client-go/applyconfigurations/core/v1", + }, +} + func Cmd() *cobra.Command { var outFlag string var headerFlag string @@ -127,7 +139,7 @@ func (g *Generator) Generate(t types.Type) []ast.Node { // reference needs to be a pointer or changed to a newly generated // type. Partialization of (anonymous) structs, is generation of a new // struct type. - partialized := g.partialize(named.Underlying()) + partialized := g.partialize(named.Underlying(), nil) var params *ast.FieldList if named.TypeParams().Len() > 0 { @@ -185,22 +197,22 @@ func (g *Generator) typeToNode(t types.Type) ast.Node { return node } -func (g *Generator) partialize(t types.Type) types.Type { +func (g *Generator) partialize(t types.Type, tag *StructTag) types.Type { // TODO cache me. switch t := t.(type) { case *types.Basic, *types.Interface, *types.Alias: return t case *types.Pointer: - return types.NewPointer(g.partialize(t.Elem())) + return types.NewPointer(g.partialize(t.Elem(), tag)) case *types.Map: - return types.NewMap(t.Key(), g.partialize(t.Elem())) + return types.NewMap(t.Key(), g.partialize(t.Elem(), nil)) case *types.Slice: - return types.NewSlice(g.partialize(t.Elem())) + return types.NewSlice(g.partialize(t.Elem(), tag)) case *types.Struct: return g.partializeStruct(t) case *types.Named: - return g.partializeNamed(t) + return g.partializeNamed(t, tag) case *types.TypeParam: return t // TODO this isn't super easy to fully support without a lot of additional information...... default: @@ -214,7 +226,7 @@ func (g *Generator) partializeStruct(t *types.Struct) *types.Struct { for i := 0; i < t.NumFields(); i++ { field := t.Field(i) - partialized := g.partialize(field.Type()) + partialized := g.partialize(field.Type(), parseTag(t.Tag(i)).Named("partial")) switch partialized.Underlying().(type) { case *types.Basic: partialized = types.NewPointer(partialized) @@ -240,7 +252,7 @@ func (g *Generator) partializeStruct(t *types.Struct) *types.Struct { return types.NewStruct(fields, tags) } -func (g *Generator) partializeNamed(t *types.Named) types.Type { +func (g *Generator) partializeNamed(t *types.Named, tag *StructTag) types.Type { // If there exists a Partial___ variant of the type, we'll use this. This // allows Partial structs to references partial structs from other packages // that contain Partialized structs and/or allows end users to provide @@ -266,6 +278,38 @@ func (g *Generator) partializeNamed(t *types.Named) types.Type { // NB: This check MUST match the check in FindAllNames. isPartialized := inPkg && !IsType[*types.Basic](t.Underlying()) if !isPartialized { + if tag != nil { + for _, value := range tag.Values { + if value == "builtin" { + path := t.Obj().Pkg().Path() + if override, ok := packagePartials[path]; ok { + var args []types.Type + for i := 0; i < t.TypeArgs().Len(); i++ { + args = append(args, g.partialize(t.TypeArgs().At(i), nil)) + } + + params := make([]*types.TypeParam, t.TypeParams().Len()) + for i := 0; i < t.TypeParams().Len(); i++ { + param := t.TypeParams().At(i) + // Might need to clone the typename here + params[i] = types.NewTypeParam(param.Obj(), param.Constraint()) + } + + named := types.NewNamed(types.NewTypeName(0, types.NewPackage(override.Path, override.Name), t.Obj().Name()+"ApplyConfiguration", t.Underlying()), t.Underlying(), nil) + if len(args) < 1 { + return named + } + named.SetTypeParams(params) + result, err := types.Instantiate(nil, named, args, true) + if err != nil { + panic(err) + } + return result + } + } + } + } + // If we haven't partialized this type, there's nothing we can do. Noop. return t } @@ -277,7 +321,7 @@ func (g *Generator) partializeNamed(t *types.Named) types.Type { var args []types.Type for i := 0; i < t.TypeArgs().Len(); i++ { - args = append(args, g.partialize(t.TypeArgs().At(i))) + args = append(args, g.partialize(t.TypeArgs().At(i), nil)) } params := make([]*types.TypeParam, t.TypeParams().Len()) @@ -369,6 +413,14 @@ func GeneratePartial(pkg *packages.Package, structName string, outPackage string if pkg, ok := originalImports[parent.Name]; ok { imports[parent.Name] = pkg } + + for _, pkg := range packagePartials { + if pkg.Name == parent.Name { + // NB: we don't actually use the import name below, so + // we just set it to empty here + imports[pkg.Name] = types.NewPackage(pkg.Path, "") + } + } } return true }) @@ -524,6 +576,9 @@ func EnsureOmitEmpty(tag string) string { var out strings.Builder for i, p := range parts { + if p.Name == "partial" { + continue + } if i > 0 { _, _ = out.WriteRune(' ') } @@ -547,7 +602,18 @@ func IsType[T types.Type](typ types.Type) bool { var tagRe = regexp.MustCompile(`([a-z_]+):"([^"]+)"`) -func parseTag(tag string) []StructTag { +type StructTags []StructTag + +func (t StructTags) Named(name string) *StructTag { + for i, tag := range t { + if tag.Name == name { + return &t[i] + } + } + return nil +} + +func parseTag(tag string) StructTags { matches := tagRe.FindAllStringSubmatch(tag, -1) tags := make([]StructTag, len(matches)) diff --git a/go.work.sum b/go.work.sum index 7b09c2e94..3bf233de3 100644 --- a/go.work.sum +++ b/go.work.sum @@ -23,6 +23,10 @@ buf.build/gen/go/redpandadata/common/connectrpc/go v1.16.2-20240508150812-e0d0fb buf.build/gen/go/redpandadata/common/connectrpc/go v1.18.1-20240917150400-3f349e63f44a.1 h1:EPRfGAJDTnM3J3MPGMPEs+HBezpiE/8lTWB3kdlQTGI= buf.build/gen/go/redpandadata/common/connectrpc/go v1.18.1-20240917150400-3f349e63f44a.1/go.mod h1:ZNgPT3k1W0p+EkMibCzOqoHOhNDi1ym6RH7/kGEHeKE= buf.build/gen/go/redpandadata/common/protocolbuffers/go v1.34.2-20240715174743-9c0afe867874.2/go.mod h1:wThyg02xJx4K/DA5fg0QlKts8XVPyTT86JC8hPfEzno= +buf.build/gen/go/redpandadata/core/connectrpc/go v1.19.1-20251031193904-15e1d027dabd.2 h1:jCG4Odp8EuikMWru6WwVylzjrSfJImAfrNeImtfp6rs= +buf.build/gen/go/redpandadata/core/connectrpc/go v1.19.1-20251031193904-15e1d027dabd.2/go.mod h1:YY+peV2t5WRrsN5JCawfDfdePKQVNhaO+0l/9Tsi+oY= +buf.build/gen/go/redpandadata/core/protocolbuffers/go v1.36.10-20251031193904-15e1d027dabd.1 h1:qbwdlxQSRcBFlq8Kcl532kcMuR+64TuvnhLC49FxzJE= +buf.build/gen/go/redpandadata/core/protocolbuffers/go v1.36.10-20251031193904-15e1d027dabd.1/go.mod h1:QenSPzqxZpyo9hHIpRzTetvDchelVDzimnmaggHKenc= buf.build/gen/go/redpandadata/dataplane/connectrpc/go v1.16.2-20240620104934-3415ce922cfb.1/go.mod h1:R0DNyd3sxZqaTQrcjSgGaJqHndFCf3kKHBbXgKYzKDY= buf.build/gen/go/redpandadata/dataplane/protocolbuffers/go v1.34.2-20240620104934-3415ce922cfb.2/go.mod h1:AcLjVYZHtwlZvBrjuqyjtZtHv9BbDaHD6C92lO/gJFI= buf.build/gen/go/redpandadata/dataplane/protocolbuffers/go v1.36.2-20250404200318-65f29ddd7b29.1/go.mod h1:zTNjffbkXs9K5/sbSlagide7l0hSTs+Oa1j39yENO8M= @@ -694,6 +698,8 @@ cloud.google.com/go/workflows v1.14.0/go.mod h1:kjar2tf4qQu7VoCTFX+L3yy+2dIFTWr6 cloud.google.com/go/workflows v1.14.2 h1:phBz5TOAES0YGogxZ6Q7ISSudaf618lRhE3euzBpE9U= cloud.google.com/go/workflows v1.14.2/go.mod h1:5nqKjMD+MsJs41sJhdVrETgvD5cOK3hUcAs8ygqYvXQ= connectrpc.com/connect v1.16.2/go.mod h1:n2kgwskMHXC+lVqb18wngEpF95ldBHXjZYJussz5FRc= +connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14= +connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w= connectrpc.com/grpcreflect v1.2.0 h1:Q6og1S7HinmtbEuBvARLNwYmTbhEGRpHDhqrPNlmK+U= connectrpc.com/grpcreflect v1.2.0/go.mod h1:nwSOKmE8nU5u/CidgHtPYk1PFI3U9ignz7iDMxOYkSY= connectrpc.com/grpcreflect v1.3.0 h1:Y4V+ACf8/vOb1XOc251Qun7jMB75gCUNw6llvB9csXc= @@ -1416,6 +1422,8 @@ github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 h1:28FVBuwkwowZMjbA7M0wXsI6t3PYulRTMio3SO+eKCM= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= @@ -3198,6 +3206,8 @@ google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojt google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v1 v1.3.7/go.mod h1:vs0oy7ub8knYaut5kITUTmx/WeE4xRuEeOR34yEAWEA= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= diff --git a/operator/api/redpanda/v1alpha2/conversion.go b/operator/api/redpanda/v1alpha2/conversion.go index b97011d59..7c3dcbb13 100644 --- a/operator/api/redpanda/v1alpha2/conversion.go +++ b/operator/api/redpanda/v1alpha2/conversion.go @@ -16,6 +16,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + applycorev1 "k8s.io/client-go/applyconfigurations/core/v1" "github.com/redpanda-data/redpanda-operator/charts/console/v3" "github.com/redpanda-data/redpanda-operator/pkg/ir" @@ -299,12 +300,11 @@ var ( conv_runtime_RawExtension_To_corev1_Volume = convertRuntimeRawExtension[corev1.Volume] conv_runtime_RawExtension_To_corev1_VolumeMount = convertRuntimeRawExtension[corev1.VolumeMount] - // TODO THIS IS BAD AND BROKEN (Will write 0s for unspecified fields and generate invalid options). - // ConsolePartialValues really needs to have ApplyConfigs for most k8s types. - // Upgrade gen partial to pull an overridden type from a comment or field tag? - conv_LivenessProbe_To_ProbeApplyConfiguration = convertViaMarshaling[*LivenessProbe, *ProbeApplyConfiguration] - conv_ProbeApplyConfiguration_To_corev1_Probe = convertViaMarshaling[ProbeApplyConfiguration, corev1.Probe] - conv_ReadinessProbe_To_ProbeApplyConfiguration = convertViaMarshaling[*ReadinessProbe, *ProbeApplyConfiguration] + // LivenessProbe/ReadinessProbe conversions (RedpandaConsole -> Console) + + conv_LivenessProbe_To_ProbeApplyConfiguration = convertViaMarshaling[*LivenessProbe, *ProbeApplyConfiguration] + conv_ReadinessProbe_To_ProbeApplyConfiguration = convertViaMarshaling[*ReadinessProbe, *ProbeApplyConfiguration] + conv_ProbeApplyConfiguration_To_ProbeApplyConfiguration = convertViaMarshaling[*ProbeApplyConfiguration, *applycorev1.ProbeApplyConfiguration] ) type deepCopier[T any] interface { diff --git a/operator/api/redpanda/v1alpha2/zz_generated.conversion.go b/operator/api/redpanda/v1alpha2/zz_generated.conversion.go index bdcb6c54b..c374aa7d7 100644 --- a/operator/api/redpanda/v1alpha2/zz_generated.conversion.go +++ b/operator/api/redpanda/v1alpha2/zz_generated.conversion.go @@ -152,16 +152,16 @@ func init() { } consolePartialRenderValues.Secret = v1alpha2SecretConfigToPConsolePartialSecretConfig((*source).Secret) consolePartialRenderValues.LicenseSecretRef = pV1SecretKeySelectorToPV1SecretKeySelector((*source).LicenseSecretRef) - pV1Probe, err := pV1alpha2ProbeApplyConfigurationToPV1Probe((*source).LivenessProbe) + pV1ProbeApplyConfiguration, err := conv_ProbeApplyConfiguration_To_ProbeApplyConfiguration((*source).LivenessProbe) if err != nil { return nil, err } - consolePartialRenderValues.LivenessProbe = pV1Probe - pV1Probe2, err := pV1alpha2ProbeApplyConfigurationToPV1Probe((*source).ReadinessProbe) + consolePartialRenderValues.LivenessProbe = pV1ProbeApplyConfiguration + pV1ProbeApplyConfiguration2, err := conv_ProbeApplyConfiguration_To_ProbeApplyConfiguration((*source).ReadinessProbe) if err != nil { return nil, err } - consolePartialRenderValues.ReadinessProbe = pV1Probe2 + consolePartialRenderValues.ReadinessProbe = pV1ProbeApplyConfiguration2 consolePartialRenderValues.Deployment = autoconv_DeploymentConfig_console_PartialDeploymentConfig((*source).Deployment) consolePartialRenderValues.Strategy = pV1DeploymentStrategyToPV1DeploymentStrategy((*source).Strategy) pConsolePartialRenderValues = &consolePartialRenderValues @@ -1108,17 +1108,6 @@ func pV1alpha2OIDCLoginSecretsToPConsolePartialOIDCLoginSecrets(source *OIDCLogi } return pConsolePartialOIDCLoginSecrets } -func pV1alpha2ProbeApplyConfigurationToPV1Probe(source *ProbeApplyConfiguration) (*v1.Probe, error) { - var pV1Probe *v1.Probe - if source != nil { - v1Probe, err := conv_ProbeApplyConfiguration_To_corev1_Probe((*source)) - if err != nil { - return nil, err - } - pV1Probe = &v1Probe - } - return pV1Probe, nil -} func pV1alpha2RedpandaAdminAPISecretsToPConsolePartialRedpandaAdminAPISecrets(source *RedpandaAdminAPISecrets) *v3.PartialRedpandaAdminAPISecrets { var pConsolePartialRedpandaAdminAPISecrets *v3.PartialRedpandaAdminAPISecrets if source != nil { From 4fcca1cb04c0ddf86b31a998b791048c34201704 Mon Sep 17 00:00:00 2001 From: Andrew Stucki Date: Tue, 25 Nov 2025 15:30:52 -0500 Subject: [PATCH 03/12] Add full render and acceptance test --- acceptance/features/console-upgrades.feature | 88 +++++++++++++++++++ acceptance/steps/console.go | 12 +++ acceptance/steps/helpers.go | 12 +++ acceptance/steps/register.go | 4 + .../redpanda/redpanda_controller.go | 4 + operator/internal/lifecycle/client.go | 73 ++++++++++++++- operator/internal/lifecycle/interfaces.go | 6 ++ .../internal/lifecycle/v2_simple_resources.go | 29 +++++- pkg/kube/ctl.go | 4 + pkg/kube/syncer.go | 16 ++++ 10 files changed, 242 insertions(+), 6 deletions(-) diff --git a/acceptance/features/console-upgrades.feature b/acceptance/features/console-upgrades.feature index e69de29bb..8f766957b 100644 --- a/acceptance/features/console-upgrades.feature +++ b/acceptance/features/console-upgrades.feature @@ -0,0 +1,88 @@ +@operator:none +Feature: Upgrading the operator with Console installed + @skip:gke @skip:aks @skip:eks + Scenario: Console v2 to v3 no warnings + Given I helm install "redpanda-operator" "redpanda/operator" --version v25.1.3 with values: + """ + """ + And I apply Kubernetes manifest: + """ + --- + apiVersion: cluster.redpanda.com/v1alpha2 + kind: Redpanda + metadata: + namespace: redpanda-system + name: operator-console-upgrade + spec: + clusterSpec: + console: + nameOverride: broken + tls: + enabled: false + external: + enabled: false + statefulset: + replicas: 1 + sideCars: + image: + tag: dev + repository: localhost/redpanda-operator + """ + And cluster "operator-console-upgrade" is available + Then I can upgrade to the latest operator with the values: + """ + image: + tag: dev + repository: localhost/redpanda-operator + crds: + experimental: true + """ + And cluster "operator-console-upgrade" should be stable with 1 nodes + And the migrated console cluster "operator-console-upgrade-console" should have 0 warnings + + @skip:gke @skip:aks @skip:eks + Scenario: Console v2 to v3 with warnings + Given I helm install "redpanda-operator" "redpanda/operator" --version v25.1.3 with values: + """ + """ + And I apply Kubernetes manifest: + """ + --- + apiVersion: cluster.redpanda.com/v1alpha2 + kind: Redpanda + metadata: + namespace: redpanda-system + name: operator-console-upgrade-warnings + spec: + clusterSpec: + console: + nameOverride: broken + console: + roleBindings: + - roleName: admin + subjects: + - kind: group + provider: OIDC + name: devs + tls: + enabled: false + external: + enabled: false + statefulset: + replicas: 1 + sideCars: + image: + tag: dev + repository: localhost/redpanda-operator + """ + And cluster "operator-console-upgrade-warnings" is available + Then I can upgrade to the latest operator with the values: + """ + image: + tag: dev + repository: localhost/redpanda-operator + crds: + experimental: true + """ + And cluster "operator-console-upgrade-warnings" should be stable with 1 nodes + And the migrated console cluster "operator-console-upgrade-console" should have 1 warning diff --git a/acceptance/steps/console.go b/acceptance/steps/console.go index d5e361372..ecad98983 100644 --- a/acceptance/steps/console.go +++ b/acceptance/steps/console.go @@ -33,3 +33,15 @@ func consoleIsHealthy(ctx context.Context, t framework.TestingT, name string) { return upToDate && hasHealthyReplicas }, time.Minute, 10*time.Second) } + +func consoleHasWarnings(ctx context.Context, t framework.TestingT, name string, expected int) { + key := t.ResourceKey(name) + + t.Logf("Checking console %q has %d warning(s)", name, expected) + require.Eventually(t, func() bool { + var console redpandav1alpha2.Console + require.NoError(t, t.Get(ctx, key, &console)) + + return len(console.Spec.Warnings) == expected + }, time.Minute, 10*time.Second) +} diff --git a/acceptance/steps/helpers.go b/acceptance/steps/helpers.go index beb7fe975..74cda1259 100644 --- a/acceptance/steps/helpers.go +++ b/acceptance/steps/helpers.go @@ -657,3 +657,15 @@ func getVersion(t framework.TestingT, version string) string { return t.Variant() } + +func sleepALongTime(ctx context.Context) error { + t := framework.T(ctx) + + t.Log("Becoming debuggable by sleeping for 20 minutes") + select { + case <-time.After(20 * time.Minute): + return nil + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/acceptance/steps/register.go b/acceptance/steps/register.go index dbdda4881..5ab90bbc8 100644 --- a/acceptance/steps/register.go +++ b/acceptance/steps/register.go @@ -113,4 +113,8 @@ func init() { // Console scenario steps framework.RegisterStep(`^Console "([^"]+)" will be healthy`, consoleIsHealthy) + framework.RegisterStep(`^the migrated console cluster "([^"]+)" should have (\d+) warning(s)?$`, consoleHasWarnings) + + // Debug steps + framework.RegisterStep(`^I become debuggable$`, sleepALongTime) } diff --git a/operator/internal/controller/redpanda/redpanda_controller.go b/operator/internal/controller/redpanda/redpanda_controller.go index 86b7bd095..4c2b97748 100644 --- a/operator/internal/controller/redpanda/redpanda_controller.go +++ b/operator/internal/controller/redpanda/redpanda_controller.go @@ -251,11 +251,13 @@ func (r *RedpandaReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r // if we have an error or an explicit requeue from one of our // sub reconcilers, then just early return if err != nil || result.Requeue || result.RequeueAfter > 0 { + log.FromContext(ctx).V(log.TraceLevel).Info("aborting reconciliation early", "error", err, "requeue", result.Requeue, "requeueAfter", result.RequeueAfter) return r.syncStatus(ctx, state, result, err) } } // we're at the end of reconciliation, so sync back our status + log.FromContext(ctx).V(log.TraceLevel).Info("finished normal reconciliation loop") return r.syncStatus(ctx, state, ctrl.Result{}, nil) } @@ -820,7 +822,9 @@ func (r *RedpandaReconciler) clusterConfigFor(ctx context.Context, rp *redpandav // syncStatus updates the status of the Redpanda cluster at the end of reconciliation when // no more reconciliation should occur. func (r *RedpandaReconciler) syncStatus(ctx context.Context, state *clusterReconciliationState, result ctrl.Result, err error) (ctrl.Result, error) { + original := state.cluster.Redpanda.Status.DeepCopy() if r.LifecycleClient.SetClusterStatus(state.cluster, state.status) { + log.FromContext(ctx).V(log.TraceLevel).Info("setting cluster status from diff", "original", original, "new", state.cluster.Redpanda.Status) syncErr := r.Client.Status().Update(ctx, state.cluster.Redpanda) err = errors.Join(syncErr, err) } diff --git a/operator/internal/lifecycle/client.go b/operator/internal/lifecycle/client.go index 37aaeba89..6ef7e1117 100644 --- a/operator/internal/lifecycle/client.go +++ b/operator/internal/lifecycle/client.go @@ -13,6 +13,7 @@ import ( "context" "fmt" "maps" + "reflect" "slices" "github.com/cockroachdb/errors" @@ -21,9 +22,11 @@ import ( corev1 "k8s.io/api/core/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -115,6 +118,14 @@ func (r *renderer[T, U]) Types() []kube.Object { } func (r *ResourceClient[T, U]) syncer(owner U) *kube.Syncer { + migratingResources := map[string]struct{}{} + if mr, ok := r.simpleResourceRenderer.(MigratingRenderer); ok { + for _, resource := range mr.MigratingResources() { + gvk := resource.GetObjectKind().GroupVersionKind() + migratingResources[gvk.String()] = struct{}{} + } + } + return &kube.Syncer{ Ctl: r.ctl, Namespace: owner.GetNamespace(), @@ -122,6 +133,12 @@ func (r *ResourceClient[T, U]) syncer(owner U) *kube.Syncer { Cluster: owner, SimpleResourceRenderer: r.simpleResourceRenderer, }, + MigratedResource: func(o kube.Object) bool { + if _, ok := migratingResources[o.GetObjectKind().GroupVersionKind().String()]; ok { + return true + } + return false + }, OwnershipLabels: r.ownershipResolver.GetOwnerLabels(owner), Preprocess: func(o kube.Object) { if o.GetLabels() == nil { @@ -181,13 +198,61 @@ type Builder interface { Watches(object client.Object, eventHandler handler.EventHandler, opts ...builder.WatchesOption) *builder.Builder } +type loggingHandler[T client.Object] struct { + handler.TypedEventHandler[T, reconcile.Request] +} + +func (h *loggingHandler[T]) Create(ctx context.Context, evt event.TypedCreateEvent[T], wq workqueue.TypedRateLimitingInterface[reconcile.Request]) { + h.TypedEventHandler.Create(ctx, evt, wrapQueue(ctx, evt.Object, wq)) +} + +func (h *loggingHandler[T]) Update(ctx context.Context, evt event.TypedUpdateEvent[T], wq workqueue.TypedRateLimitingInterface[reconcile.Request]) { + h.TypedEventHandler.Update(ctx, evt, wrapQueue(ctx, evt.ObjectNew, wq)) +} + +func (h *loggingHandler[T]) Delete(ctx context.Context, evt event.TypedDeleteEvent[T], wq workqueue.TypedRateLimitingInterface[reconcile.Request]) { + h.TypedEventHandler.Delete(ctx, evt, wrapQueue(ctx, evt.Object, wq)) +} + +func (h *loggingHandler[T]) Generic(ctx context.Context, evt event.TypedGenericEvent[T], wq workqueue.TypedRateLimitingInterface[reconcile.Request]) { + h.TypedEventHandler.Generic(ctx, evt, wrapQueue(ctx, evt.Object, wq)) +} + +type wrappedAdder struct { + obj client.Object + ctx context.Context + workqueue.TypedRateLimitingInterface[reconcile.Request] +} + +func wrapQueue(ctx context.Context, obj client.Object, wq workqueue.TypedRateLimitingInterface[reconcile.Request]) workqueue.TypedRateLimitingInterface[reconcile.Request] { + return &wrappedAdder{ + obj: obj, + ctx: ctx, + TypedRateLimitingInterface: wq, + } +} + +func (w *wrappedAdder) Add(item reconcile.Request) { + log.FromContext(w.ctx).V(log.TraceLevel).Info("[enqueue] adding reconciliation request", "request", item, "due-to", reflect.TypeOf(w.obj).String(), "name", client.ObjectKeyFromObject(w.obj)) + w.TypedRateLimitingInterface.Add(item) +} + +func wrapLoggingHandler[T client.Object](_ T, handler handler.TypedEventHandler[T, reconcile.Request]) handler.TypedEventHandler[T, reconcile.Request] { + return &loggingHandler[T]{TypedEventHandler: handler} +} + // WatchResources configures resource watching for the given cluster, including StatefulSets and other resources. func (r *ResourceClient[T, U]) WatchResources(builder Builder, cluster client.Object) error { // set that this is for the cluster builder.For(cluster) + owns := func(obj client.Object) { + loggingHandler := wrapLoggingHandler(obj, handler.EnqueueRequestForOwner(r.ctl.Scheme(), r.ctl.RESTMapper(), cluster, handler.OnlyControllerOwner())) + builder.Watches(obj, loggingHandler) + } + // set an Owns on node pool statefulsets - builder.Owns(&appsv1.StatefulSet{}) + owns(&appsv1.StatefulSet{}) for _, resourceType := range r.simpleResourceRenderer.WatchedResourceTypes() { gvk, err := kube.GVKFor(r.ctl.Scheme(), resourceType) @@ -211,20 +276,20 @@ func (r *ResourceClient[T, U]) WatchResources(builder Builder, cluster client.Ob if mapping == apimeta.RESTScopeNameNamespace { // we're working with a namespace scoped resource, so we can work with ownership - builder.Owns(resourceType) + owns(resourceType) continue } // since resources are cluster-scoped we need to call a Watch on them with some // custom mappings - builder.Watches(resourceType, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request { + builder.Watches(resourceType, wrapLoggingHandler(resourceType, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request { if owner := r.ownershipResolver.OwnerForObject(o); owner != nil { return []reconcile.Request{{ NamespacedName: *owner, }} } return nil - })) + }))) } diff --git a/operator/internal/lifecycle/interfaces.go b/operator/internal/lifecycle/interfaces.go index fba091b8d..faadbb9e4 100644 --- a/operator/internal/lifecycle/interfaces.go +++ b/operator/internal/lifecycle/interfaces.go @@ -104,6 +104,12 @@ type SimpleResourceRenderer[T any, U Cluster[T]] interface { WatchedResourceTypes() []client.Object } +// MigratingRenderer allows an implementation to render resources that they +// don't actually want to watch due to them being temporary migrations. +type MigratingRenderer interface { + MigratingResources() []client.Object +} + // NodePoolRender handles returning the node pools for a given cluster. // These are handled separately from "simple" resources because we need // to manage their lifecycle, decommissioning broker nodes and scaling diff --git a/operator/internal/lifecycle/v2_simple_resources.go b/operator/internal/lifecycle/v2_simple_resources.go index c7fbcd46a..dc03d2606 100644 --- a/operator/internal/lifecycle/v2_simple_resources.go +++ b/operator/internal/lifecycle/v2_simple_resources.go @@ -12,6 +12,8 @@ package lifecycle import ( "context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -42,8 +44,7 @@ func (m *V2SimpleResourceRenderer) Render(ctx context.Context, cluster *ClusterW spec := cluster.Spec.ClusterSpec.DeepCopy() if spec != nil { - // normalize the spec by removing the connectors and console stanzas which are deprecated - spec.Console = nil + // normalize the spec by removing the connectors stanza since it's deprecated spec.Connectors = nil } @@ -59,6 +60,9 @@ func (m *V2SimpleResourceRenderer) Render(ctx context.Context, cluster *ClusterW return nil, err } + // disable the console spec components so we don't try to render it twice + state.Values.Console.Enabled = ptr.To(false) + resources, err := redpanda.RenderResources(state) if err != nil { return nil, err @@ -91,6 +95,14 @@ func (m *V2SimpleResourceRenderer) consoleIntegration( } return &redpandav1alpha2.Console{ + TypeMeta: metav1.TypeMeta{ + Kind: "Console", + APIVersion: redpandav1alpha2.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name, + Namespace: cluster.Namespace, + }, Spec: redpandav1alpha2.ConsoleSpec{ ConsoleValues: *values, ClusterSource: &redpandav1alpha2.ClusterSource{ @@ -107,3 +119,16 @@ func (m *V2SimpleResourceRenderer) consoleIntegration( func (m *V2SimpleResourceRenderer) WatchedResourceTypes() []client.Object { return redpanda.Types() } + +// MigratingResources returns a list of resources that need to be migrated +// away from being managed by the Redpanda CRD. +func (m *V2SimpleResourceRenderer) MigratingResources() []client.Object { + return []client.Object{ + &redpandav1alpha2.Console{ + TypeMeta: metav1.TypeMeta{ + Kind: "Console", + APIVersion: redpandav1alpha2.GroupVersion.String(), + }, + }, + } +} diff --git a/pkg/kube/ctl.go b/pkg/kube/ctl.go index f4a2c9882..258bbeb0a 100644 --- a/pkg/kube/ctl.go +++ b/pkg/kube/ctl.go @@ -148,6 +148,10 @@ func (c *Ctl) ScopeOf(gvk schema.GroupVersionKind) (meta.RESTScopeName, error) { return mapping.Scope.Name(), nil } +func (c *Ctl) RESTMapper() meta.RESTMapper { + return c.client.RESTMapper() +} + // Get fetches the latest state of an object into `obj` from Kubernetes. // Usage: // diff --git a/pkg/kube/syncer.go b/pkg/kube/syncer.go index 4891a8ec5..ea00eb0bb 100644 --- a/pkg/kube/syncer.go +++ b/pkg/kube/syncer.go @@ -55,6 +55,8 @@ type Syncer struct { // OwnershipLabels CAN NOT be changed without abandoning objects. OwnershipLabels map[string]string + MigratedResource func(Object) bool + // Preprocess, if provided, is run ahead of applying Objects. It may be // used to add additional labels, annotation, etc uniformly. Preprocess func(Object) @@ -150,11 +152,19 @@ func (s *Syncer) DeleteAll(ctx context.Context) (bool, error) { return alive > 0, nil } +func (s *Syncer) skipItem(o Object) bool { + return s.MigratedResource != nil && s.MigratedResource(o) +} + func (s *Syncer) listInPurview(ctx context.Context) ([]Object, error) { logger := log.FromContext(ctx) var objects []Object for _, t := range s.Renderer.Types() { + if s.skipItem(t) { + continue + } + gvk, err := GVKFor(s.Ctl.Scheme(), t) if err != nil { return nil, err @@ -226,6 +236,12 @@ func (s *Syncer) toSync(ctx context.Context) ([]Object, error) { } for _, obj := range objs { + if s.skipItem(obj) { + // we only pre-process items that are not intentionally skipped + // for the purpose of migration + continue + } + // Ensure that all types returned are present in s.Types. If they aren't // we'd potentially "leak" objects. if _, ok := expectedTypes[reflect.TypeOf(obj)]; !ok { From e40a2bfa9b8631a441d663c188238d44c94dac67 Mon Sep 17 00:00:00 2001 From: Andrew Stucki Date: Tue, 25 Nov 2025 16:03:31 -0500 Subject: [PATCH 04/12] Fix up unit tests --- .../console-migration-cases.golden.txtar | 2 + operator/internal/lifecycle/client.go | 19 +- operator/internal/lifecycle/client_test.go | 2 + .../testdata/cases.pools.golden.txtar | 7 +- .../testdata/cases.resources.golden.txtar | 438 +----------------- .../testdata/cases.values.golden.txtar | 4 +- 6 files changed, 33 insertions(+), 439 deletions(-) diff --git a/operator/api/redpanda/v1alpha2/testdata/console-migration-cases.golden.txtar b/operator/api/redpanda/v1alpha2/testdata/console-migration-cases.golden.txtar index 03b8b3a5c..45f694454 100644 --- a/operator/api/redpanda/v1alpha2/testdata/console-migration-cases.golden.txtar +++ b/operator/api/redpanda/v1alpha2/testdata/console-migration-cases.golden.txtar @@ -29,8 +29,10 @@ secret: {} -- 03-config-and-console -- config: authentication: + jwtSigningKey: secret123 someOtherSetting: - absolutely + useSecureCookies: true kafka: sasl: enabled: true diff --git a/operator/internal/lifecycle/client.go b/operator/internal/lifecycle/client.go index 6ef7e1117..5a8c11269 100644 --- a/operator/internal/lifecycle/client.go +++ b/operator/internal/lifecycle/client.go @@ -71,6 +71,7 @@ func NewResourceClient[T any, U Cluster[T]](mgr ctrl.Manager, resourcesFn Resour statusUpdater: statusUpdater, nodePoolRenderer: nodePoolRenderer, simpleResourceRenderer: simpleResourceRenderer, + traceLogging: true, } } @@ -79,6 +80,7 @@ func NewResourceClient[T any, U Cluster[T]](mgr ctrl.Manager, resourcesFn Resour type ResourceClient[T any, U Cluster[T]] struct { ctl *kube.Ctl logger logr.Logger + traceLogging bool ownershipResolver OwnershipResolver[T, U] statusUpdater ClusterStatusUpdater[T, U] nodePoolRenderer NodePoolRenderer[T, U] @@ -247,8 +249,12 @@ func (r *ResourceClient[T, U]) WatchResources(builder Builder, cluster client.Ob builder.For(cluster) owns := func(obj client.Object) { - loggingHandler := wrapLoggingHandler(obj, handler.EnqueueRequestForOwner(r.ctl.Scheme(), r.ctl.RESTMapper(), cluster, handler.OnlyControllerOwner())) - builder.Watches(obj, loggingHandler) + if r.traceLogging { + loggingHandler := wrapLoggingHandler(obj, handler.EnqueueRequestForOwner(r.ctl.Scheme(), r.ctl.RESTMapper(), cluster, handler.OnlyControllerOwner())) + builder.Watches(obj, loggingHandler) + } else { + builder.Owns(obj) + } } // set an Owns on node pool statefulsets @@ -282,15 +288,18 @@ func (r *ResourceClient[T, U]) WatchResources(builder Builder, cluster client.Ob // since resources are cluster-scoped we need to call a Watch on them with some // custom mappings - builder.Watches(resourceType, wrapLoggingHandler(resourceType, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request { + watchHandler := handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request { if owner := r.ownershipResolver.OwnerForObject(o); owner != nil { return []reconcile.Request{{ NamespacedName: *owner, }} } return nil - }))) - + }) + if r.traceLogging { + watchHandler = wrapLoggingHandler(resourceType, watchHandler) + } + builder.Watches(resourceType, watchHandler) } return nil diff --git a/operator/internal/lifecycle/client_test.go b/operator/internal/lifecycle/client_test.go index 81b52a4aa..0b1e9de38 100644 --- a/operator/internal/lifecycle/client_test.go +++ b/operator/internal/lifecycle/client_test.go @@ -140,6 +140,7 @@ func (tt *clientTest) setupClient(ctx context.Context, t *testing.T) (*clientTes resolver, updater, nodeRenderer, resourceRenderer, factory := MockResourceManagersSetup() resourceClient := NewResourceClient(manager, factory) + resourceClient.traceLogging = false return &clientTestInstances{ resolver: resolver, @@ -386,6 +387,7 @@ func TestClientWatchResources(t *testing.T) { require.NoError(t, instances.resourceClient.WatchResources(builder, &MockCluster{})) require.Equal(t, "*lifecycle.MockCluster", builder.Base()) + require.ElementsMatch(t, tt.ownedResources, builder.Owned()) require.ElementsMatch(t, tt.watchedResources, builder.Watched()) }) diff --git a/operator/internal/lifecycle/testdata/cases.pools.golden.txtar b/operator/internal/lifecycle/testdata/cases.pools.golden.txtar index 315c45e32..f516108a2 100644 --- a/operator/internal/lifecycle/testdata/cases.pools.golden.txtar +++ b/operator/internal/lifecycle/testdata/cases.pools.golden.txtar @@ -1219,7 +1219,7 @@ valueFrom: fieldRef: fieldPath: status.hostIP - image: redpandadata/redpanda:v25.2.1 + image: redpandadata/redpanda-unstable:v25.3.1-rc2 lifecycle: postStart: exec: @@ -1309,6 +1309,7 @@ - console-disabled - --redpanda-cluster-name - console-disabled + - --selector=helm.sh/chart=redpanda-25.1.1-beta3,app.kubernetes.io/name=redpanda,app.kubernetes.io/instance=console-disabled - --run-broker-probe - --broker-probe-broker-url - $(SERVICE_NAME).console-disabled.console-disabled.svc.cluster.local.:9644 @@ -1355,7 +1356,7 @@ - /bin/bash - -c - rpk redpanda tune all - image: redpandadata/redpanda:v25.2.1 + image: redpandadata/redpanda-unstable:v25.3.1-rc2 name: tuning resources: {} securityContext: @@ -1393,7 +1394,7 @@ fieldRef: apiVersion: v1 fieldPath: status.hostIP - image: redpandadata/redpanda:v25.2.1 + image: redpandadata/redpanda-unstable:v25.3.1-rc2 name: redpanda-configurator resources: {} volumeMounts: diff --git a/operator/internal/lifecycle/testdata/cases.resources.golden.txtar b/operator/internal/lifecycle/testdata/cases.resources.golden.txtar index bb6ce28f3..0395b9197 100644 --- a/operator/internal/lifecycle/testdata/cases.resources.golden.txtar +++ b/operator/internal/lifecycle/testdata/cases.resources.golden.txtar @@ -860,148 +860,18 @@ rpk redpanda config --config "$CONFIG" set pandaproxy.advertised_pandaproxy_api[1] "${ADVERTISED_HTTP_ADDRESSES[$POD_ORDINAL]}" type: Opaque -- apiVersion: v1 - data: - config.yaml: | - # from .Values.config - kafka: - brokers: - - basic-test-0.basic-test.basic-test.svc.cluster.local.:9093 - - basic-test-1.basic-test.basic-test.svc.cluster.local.:9093 - - basic-test-2.basic-test.basic-test.svc.cluster.local.:9093 - tls: - caFilepath: /etc/tls/certs/secrets/basic-test-default-cert/ca.crt - enabled: true - redpanda: - adminApi: - enabled: true - tls: - caFilepath: /etc/tls/certs/secrets/basic-test-default-cert/ca.crt - enabled: true - urls: - - https://basic-test.basic-test.svc.cluster.local.:9644 - schemaRegistry: - enabled: true - tls: - caFilepath: /etc/tls/certs/secrets/basic-test-default-cert/ca.crt - enabled: true - urls: - - https://basic-test-0.basic-test.basic-test.svc.cluster.local.:8081 - - https://basic-test-1.basic-test.basic-test.svc.cluster.local.:8081 - - https://basic-test-2.basic-test.basic-test.svc.cluster.local.:8081 - kind: ConfigMap +- apiVersion: cluster.redpanda.com/v1alpha2 + kind: Console metadata: creationTimestamp: null labels: - app.kubernetes.io/instance: basic-test - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: console - app.kubernetes.io/version: v3.2.2 cluster.redpanda.com/namespace: basic-test cluster.redpanda.com/operator: v2 cluster.redpanda.com/owner: basic-test - helm.sh/chart: console-3.2.0 helm.toolkit.fluxcd.io/name: basic-test helm.toolkit.fluxcd.io/namespace: basic-test - name: basic-test-console - namespace: basic-test -- apiVersion: apps/v1 - kind: Deployment - metadata: - creationTimestamp: null - labels: - app.kubernetes.io/instance: basic-test - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: console - app.kubernetes.io/version: v3.2.2 - cluster.redpanda.com/namespace: basic-test - cluster.redpanda.com/operator: v2 - cluster.redpanda.com/owner: basic-test - helm.sh/chart: console-3.2.0 - helm.toolkit.fluxcd.io/name: basic-test - helm.toolkit.fluxcd.io/namespace: basic-test - name: basic-test-console + name: basic-test namespace: basic-test - spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: basic-test - app.kubernetes.io/name: console - strategy: {} - template: - metadata: - annotations: - checksum/config: a3d974367947aee56c3c1d7882e25166b4838fe7da7d082b35e96060e3cc8533 - creationTimestamp: null - labels: - app.kubernetes.io/instance: basic-test - app.kubernetes.io/name: console - spec: - affinity: {} - automountServiceAccountToken: false - containers: - - args: - - --config.filepath=/etc/console/configs/config.yaml - image: docker.redpanda.com/redpandadata/console:v3.2.2 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 3 - httpGet: - path: /admin/health - port: http - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - name: console - ports: - - containerPort: 8080 - name: http - protocol: TCP - readinessProbe: - failureThreshold: 3 - httpGet: - path: /admin/health - port: http - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: {} - securityContext: - runAsNonRoot: true - volumeMounts: - - mountPath: /etc/console/configs - name: configs - readOnly: true - - mountPath: /etc/tls/certs - name: redpanda-certificates - securityContext: - fsGroup: 99 - fsGroupChangePolicy: Always - runAsUser: 99 - serviceAccountName: basic-test-console - volumes: - - configMap: - name: basic-test-console - name: configs - - name: redpanda-certificates - projected: - sources: - - secret: - items: - - key: ca.crt - path: secrets/basic-test-default-cert/ca.crt - name: basic-test-default-cert - status: {} -- metadata: - creationTimestamp: null - labels: - cluster.redpanda.com/namespace: basic-test - cluster.redpanda.com/operator: v2 - cluster.redpanda.com/owner: basic-test - helm.toolkit.fluxcd.io/name: basic-test - helm.toolkit.fluxcd.io/namespace: basic-test spec: cluster: clusterRef: @@ -2297,160 +2167,18 @@ echo "passed" type: Opaque -- apiVersion: v1 - data: - config.yaml: | - # from .Values.config - kafka: - brokers: - - compat-test-pool-0.compat-test.compat-test.svc.cluster.local.:9093 - - compat-test-pool-1.compat-test.compat-test.svc.cluster.local.:9093 - - compat-test-pool-2.compat-test.compat-test.svc.cluster.local.:9093 - - compat-test-pool-3.compat-test.compat-test.svc.cluster.local.:9093 - - compat-test-pool-4.compat-test.compat-test.svc.cluster.local.:9093 - - compat-test-pool-5.compat-test.compat-test.svc.cluster.local.:9093 - - compat-test-pool-6.compat-test.compat-test.svc.cluster.local.:9093 - - compat-test-pool-7.compat-test.compat-test.svc.cluster.local.:9093 - - compat-test-pool-8.compat-test.compat-test.svc.cluster.local.:9093 - tls: - caFilepath: /etc/tls/certs/secrets/compat-test-default-cert/ca.crt - enabled: true - redpanda: - adminApi: - enabled: true - tls: - caFilepath: /etc/tls/certs/secrets/compat-test-default-cert/ca.crt - enabled: true - urls: - - https://compat-test.compat-test.svc.cluster.local.:9644 - schemaRegistry: - enabled: true - tls: - caFilepath: /etc/tls/certs/secrets/compat-test-default-cert/ca.crt - enabled: true - urls: - - https://compat-test-pool-0.compat-test.compat-test.svc.cluster.local.:8081 - - https://compat-test-pool-1.compat-test.compat-test.svc.cluster.local.:8081 - - https://compat-test-pool-2.compat-test.compat-test.svc.cluster.local.:8081 - - https://compat-test-pool-3.compat-test.compat-test.svc.cluster.local.:8081 - - https://compat-test-pool-4.compat-test.compat-test.svc.cluster.local.:8081 - - https://compat-test-pool-5.compat-test.compat-test.svc.cluster.local.:8081 - - https://compat-test-pool-6.compat-test.compat-test.svc.cluster.local.:8081 - - https://compat-test-pool-7.compat-test.compat-test.svc.cluster.local.:8081 - - https://compat-test-pool-8.compat-test.compat-test.svc.cluster.local.:8081 - kind: ConfigMap +- apiVersion: cluster.redpanda.com/v1alpha2 + kind: Console metadata: creationTimestamp: null labels: - app.kubernetes.io/instance: compat-test - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: console - app.kubernetes.io/version: v3.2.2 cluster.redpanda.com/namespace: compat-test cluster.redpanda.com/operator: v2 cluster.redpanda.com/owner: compat-test - helm.sh/chart: console-3.2.0 helm.toolkit.fluxcd.io/name: compat-test helm.toolkit.fluxcd.io/namespace: compat-test - name: compat-test-console - namespace: compat-test -- apiVersion: apps/v1 - kind: Deployment - metadata: - creationTimestamp: null - labels: - app.kubernetes.io/instance: compat-test - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: console - app.kubernetes.io/version: v3.2.2 - cluster.redpanda.com/namespace: compat-test - cluster.redpanda.com/operator: v2 - cluster.redpanda.com/owner: compat-test - helm.sh/chart: console-3.2.0 - helm.toolkit.fluxcd.io/name: compat-test - helm.toolkit.fluxcd.io/namespace: compat-test - name: compat-test-console + name: compat-test namespace: compat-test - spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: compat-test - app.kubernetes.io/name: console - strategy: {} - template: - metadata: - annotations: - checksum/config: b5ebe68cfb837a39b35d01298571830506efbee4eb27a57d95b68da52a2af45f - creationTimestamp: null - labels: - app.kubernetes.io/instance: compat-test - app.kubernetes.io/name: console - spec: - affinity: {} - automountServiceAccountToken: false - containers: - - args: - - --config.filepath=/etc/console/configs/config.yaml - image: docker.redpanda.com/redpandadata/console:v3.2.2 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 3 - httpGet: - path: /admin/health - port: http - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - name: console - ports: - - containerPort: 8080 - name: http - protocol: TCP - readinessProbe: - failureThreshold: 3 - httpGet: - path: /admin/health - port: http - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: {} - securityContext: - runAsNonRoot: true - volumeMounts: - - mountPath: /etc/console/configs - name: configs - readOnly: true - - mountPath: /etc/tls/certs - name: redpanda-certificates - securityContext: - fsGroup: 99 - fsGroupChangePolicy: Always - runAsUser: 99 - serviceAccountName: compat-test-console - volumes: - - configMap: - name: compat-test-console - name: configs - - name: redpanda-certificates - projected: - sources: - - secret: - items: - - key: ca.crt - path: secrets/compat-test-default-cert/ca.crt - name: compat-test-default-cert - status: {} -- metadata: - creationTimestamp: null - labels: - cluster.redpanda.com/namespace: compat-test - cluster.redpanda.com/operator: v2 - cluster.redpanda.com/owner: compat-test - helm.toolkit.fluxcd.io/name: compat-test - helm.toolkit.fluxcd.io/namespace: compat-test spec: cluster: clusterRef: @@ -3319,20 +3047,6 @@ rpk redpanda config --config "$CONFIG" set pandaproxy.advertised_pandaproxy_api[1] "${ADVERTISED_HTTP_ADDRESSES[$POD_ORDINAL]}" type: Opaque -- metadata: - creationTimestamp: null - labels: - cluster.redpanda.com/namespace: console-disabled - cluster.redpanda.com/operator: v2 - cluster.redpanda.com/owner: console-disabled - helm.toolkit.fluxcd.io/name: console-disabled - helm.toolkit.fluxcd.io/namespace: console-disabled - spec: - cluster: - clusterRef: - name: console-disabled - secret: {} - status: {} -- nodepool-basic-test -- - apiVersion: v1 kind: Service @@ -4715,152 +4429,18 @@ rpk redpanda config --config "$CONFIG" set pandaproxy.advertised_pandaproxy_api[1] "${ADVERTISED_HTTP_ADDRESSES[$POD_ORDINAL]}" type: Opaque -- apiVersion: v1 - data: - config.yaml: | - # from .Values.config - kafka: - brokers: - - nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 - - nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 - - nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 - - nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 - - nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9093 - tls: - caFilepath: /etc/tls/certs/secrets/nodepool-basic-test-default-cert/ca.crt - enabled: true - redpanda: - adminApi: - enabled: true - tls: - caFilepath: /etc/tls/certs/secrets/nodepool-basic-test-default-cert/ca.crt - enabled: true - urls: - - https://nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:9644 - schemaRegistry: - enabled: true - tls: - caFilepath: /etc/tls/certs/secrets/nodepool-basic-test-default-cert/ca.crt - enabled: true - urls: - - https://nodepool-basic-test-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 - - https://nodepool-basic-test-1.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 - - https://nodepool-basic-test-2.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 - - https://nodepool-basic-test-basic-a-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 - - https://nodepool-basic-test-basic-b-0.nodepool-basic-test.nodepool-basic-test.svc.cluster.local.:8081 - kind: ConfigMap - metadata: - creationTimestamp: null - labels: - app.kubernetes.io/instance: nodepool-basic-test - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: console - app.kubernetes.io/version: v3.2.2 - cluster.redpanda.com/namespace: nodepool-basic-test - cluster.redpanda.com/operator: v2 - cluster.redpanda.com/owner: nodepool-basic-test - helm.sh/chart: console-3.2.0 - helm.toolkit.fluxcd.io/name: nodepool-basic-test - helm.toolkit.fluxcd.io/namespace: nodepool-basic-test - name: nodepool-basic-test-console - namespace: nodepool-basic-test -- apiVersion: apps/v1 - kind: Deployment +- apiVersion: cluster.redpanda.com/v1alpha2 + kind: Console metadata: creationTimestamp: null labels: - app.kubernetes.io/instance: nodepool-basic-test - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: console - app.kubernetes.io/version: v3.2.2 cluster.redpanda.com/namespace: nodepool-basic-test cluster.redpanda.com/operator: v2 cluster.redpanda.com/owner: nodepool-basic-test - helm.sh/chart: console-3.2.0 helm.toolkit.fluxcd.io/name: nodepool-basic-test helm.toolkit.fluxcd.io/namespace: nodepool-basic-test - name: nodepool-basic-test-console + name: nodepool-basic-test namespace: nodepool-basic-test - spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nodepool-basic-test - app.kubernetes.io/name: console - strategy: {} - template: - metadata: - annotations: - checksum/config: c85e1818140a237fff8a40f9f195dade651942089e551fb9a093fb5e82bd14ec - creationTimestamp: null - labels: - app.kubernetes.io/instance: nodepool-basic-test - app.kubernetes.io/name: console - spec: - affinity: {} - automountServiceAccountToken: false - containers: - - args: - - --config.filepath=/etc/console/configs/config.yaml - image: docker.redpanda.com/redpandadata/console:v3.2.2 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 3 - httpGet: - path: /admin/health - port: http - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - name: console - ports: - - containerPort: 8080 - name: http - protocol: TCP - readinessProbe: - failureThreshold: 3 - httpGet: - path: /admin/health - port: http - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: {} - securityContext: - runAsNonRoot: true - volumeMounts: - - mountPath: /etc/console/configs - name: configs - readOnly: true - - mountPath: /etc/tls/certs - name: redpanda-certificates - securityContext: - fsGroup: 99 - fsGroupChangePolicy: Always - runAsUser: 99 - serviceAccountName: nodepool-basic-test-console - volumes: - - configMap: - name: nodepool-basic-test-console - name: configs - - name: redpanda-certificates - projected: - sources: - - secret: - items: - - key: ca.crt - path: secrets/nodepool-basic-test-default-cert/ca.crt - name: nodepool-basic-test-default-cert - status: {} -- metadata: - creationTimestamp: null - labels: - cluster.redpanda.com/namespace: nodepool-basic-test - cluster.redpanda.com/operator: v2 - cluster.redpanda.com/owner: nodepool-basic-test - helm.toolkit.fluxcd.io/name: nodepool-basic-test - helm.toolkit.fluxcd.io/namespace: nodepool-basic-test spec: cluster: clusterRef: diff --git a/operator/internal/lifecycle/testdata/cases.values.golden.txtar b/operator/internal/lifecycle/testdata/cases.values.golden.txtar index e40213373..57259bafa 100644 --- a/operator/internal/lifecycle/testdata/cases.values.golden.txtar +++ b/operator/internal/lifecycle/testdata/cases.values.golden.txtar @@ -1103,8 +1103,8 @@ values: force: false fullnameOverride: "" image: - repository: redpandadata/redpanda - tag: v25.2.1 + repository: redpandadata/redpanda-unstable + tag: v25.3.1-rc2 license_key: "" listeners: admin: From b3f400bf2199a5f71c2effbf3ab50669cf1953c7 Mon Sep 17 00:00:00 2001 From: Andrew Stucki Date: Tue, 25 Nov 2025 16:58:23 -0500 Subject: [PATCH 05/12] remove stray namespace --- acceptance/features/console-upgrades.feature | 2 -- 1 file changed, 2 deletions(-) diff --git a/acceptance/features/console-upgrades.feature b/acceptance/features/console-upgrades.feature index 8f766957b..97cf72a05 100644 --- a/acceptance/features/console-upgrades.feature +++ b/acceptance/features/console-upgrades.feature @@ -11,7 +11,6 @@ Feature: Upgrading the operator with Console installed apiVersion: cluster.redpanda.com/v1alpha2 kind: Redpanda metadata: - namespace: redpanda-system name: operator-console-upgrade spec: clusterSpec: @@ -51,7 +50,6 @@ Feature: Upgrading the operator with Console installed apiVersion: cluster.redpanda.com/v1alpha2 kind: Redpanda metadata: - namespace: redpanda-system name: operator-console-upgrade-warnings spec: clusterSpec: From 7be7c4fe4a97d47758d607fdf1b332214fa7748f Mon Sep 17 00:00:00 2001 From: Andrew Stucki Date: Tue, 25 Nov 2025 19:58:51 -0500 Subject: [PATCH 06/12] Fix acceptance tests and enable Console reconciliation by default --- acceptance/features/console-upgrades.feature | 4 +- .../files/rbac/v2-manager.ClusterRole.yaml | 1 + .../testdata/template-cases.golden.txtar | 47 +++++++++++++++++++ operator/cmd/run/run.go | 2 +- operator/config/rbac/itemized/v2-manager.yaml | 1 + .../redpanda/redpanda_controller.go | 1 + .../controller/redpanda/testdata/role.yaml | 1 + 7 files changed, 54 insertions(+), 3 deletions(-) diff --git a/acceptance/features/console-upgrades.feature b/acceptance/features/console-upgrades.feature index 97cf72a05..baef48d55 100644 --- a/acceptance/features/console-upgrades.feature +++ b/acceptance/features/console-upgrades.feature @@ -28,7 +28,7 @@ Feature: Upgrading the operator with Console installed repository: localhost/redpanda-operator """ And cluster "operator-console-upgrade" is available - Then I can upgrade to the latest operator with the values: + Then I can helm upgrade "redpanda-operator" "../operator/chart" with values: """ image: tag: dev @@ -74,7 +74,7 @@ Feature: Upgrading the operator with Console installed repository: localhost/redpanda-operator """ And cluster "operator-console-upgrade-warnings" is available - Then I can upgrade to the latest operator with the values: + Then I can helm upgrade "redpanda-operator" "../operator/chart" with values: """ image: tag: dev diff --git a/operator/chart/files/rbac/v2-manager.ClusterRole.yaml b/operator/chart/files/rbac/v2-manager.ClusterRole.yaml index ef9af4234..8303bf26f 100644 --- a/operator/chart/files/rbac/v2-manager.ClusterRole.yaml +++ b/operator/chart/files/rbac/v2-manager.ClusterRole.yaml @@ -88,6 +88,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: diff --git a/operator/chart/testdata/template-cases.golden.txtar b/operator/chart/testdata/template-cases.golden.txtar index 84c97835b..bb4c9cfa6 100644 --- a/operator/chart/testdata/template-cases.golden.txtar +++ b/operator/chart/testdata/template-cases.golden.txtar @@ -329,6 +329,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -1102,6 +1103,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -1902,6 +1904,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -2846,6 +2849,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -3780,6 +3784,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -4567,6 +4572,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -5488,6 +5494,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -6477,6 +6484,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -7275,6 +7283,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -8067,6 +8076,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -8898,6 +8908,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -9526,6 +9537,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -10493,6 +10505,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -11332,6 +11345,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -11994,6 +12008,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -12793,6 +12808,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -13853,6 +13869,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -14860,6 +14877,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -15851,6 +15869,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -16909,6 +16928,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -17986,6 +18006,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -19023,6 +19044,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -19989,6 +20011,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -21201,6 +21224,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -22515,6 +22539,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -23541,6 +23566,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -24702,6 +24728,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -26171,6 +26198,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -27475,6 +27503,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -28431,6 +28460,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -31200,6 +31230,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -33225,6 +33256,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -35816,6 +35848,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -37262,6 +37295,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -39306,6 +39340,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -41137,6 +41172,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -42435,6 +42471,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -43855,6 +43892,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -45183,6 +45221,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -95507,6 +95546,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -96419,6 +96459,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -97330,6 +97371,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -98102,6 +98144,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -98891,6 +98934,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -99663,6 +99707,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -100435,6 +100480,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: @@ -101343,6 +101389,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: diff --git a/operator/cmd/run/run.go b/operator/cmd/run/run.go index 282207efb..4d389ba56 100644 --- a/operator/cmd/run/run.go +++ b/operator/cmd/run/run.go @@ -139,7 +139,7 @@ func (o *RunOptions) BindFlags(cmd *cobra.Command) { cmd.Flags().BoolVar(&o.webhookEnabled, "webhook-enabled", false, "Enable webhook Manager") // Controller flags. - cmd.Flags().BoolVar(&o.enableConsoleController, "enable-console", false, "Specifies whether or not to enabled the redpanda Console controller") + cmd.Flags().BoolVar(&o.enableConsoleController, "enable-console", true, "Specifies whether or not to enabled the redpanda Console controller") cmd.Flags().BoolVar(&o.enableV2NodepoolController, "enable-v2-nodepools", false, "Specifies whether or not to enabled the v2 nodepool controller") cmd.Flags().BoolVar(&o.enableShadowLinksController, "enable-shadowlinks", false, "Specifies whether or not to enabled the shadow links controller") cmd.Flags().BoolVar(&o.enableVectorizedControllers, "enable-vectorized-controllers", false, "Specifies whether or not to enabled the legacy controllers for resources in the Vectorized Group (Also known as V1 operator mode)") diff --git a/operator/config/rbac/itemized/v2-manager.yaml b/operator/config/rbac/itemized/v2-manager.yaml index 740a1fd1c..16439560e 100644 --- a/operator/config/rbac/itemized/v2-manager.yaml +++ b/operator/config/rbac/itemized/v2-manager.yaml @@ -88,6 +88,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: diff --git a/operator/internal/controller/redpanda/redpanda_controller.go b/operator/internal/controller/redpanda/redpanda_controller.go index 4c2b97748..4da90e30d 100644 --- a/operator/internal/controller/redpanda/redpanda_controller.go +++ b/operator/internal/controller/redpanda/redpanda_controller.go @@ -100,6 +100,7 @@ type RedpandaReconciler struct { // Console chart // +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cluster.redpanda.com,resources=consoles,verbs=get;list;watch;create;update;patch;delete // redpanda resources // +kubebuilder:rbac:groups=cluster.redpanda.com,resources=redpandas,verbs=get;list;watch;create;update;patch;delete diff --git a/operator/internal/controller/redpanda/testdata/role.yaml b/operator/internal/controller/redpanda/testdata/role.yaml index f14e181b3..05c475fd6 100644 --- a/operator/internal/controller/redpanda/testdata/role.yaml +++ b/operator/internal/controller/redpanda/testdata/role.yaml @@ -127,6 +127,7 @@ rules: - apiGroups: - cluster.redpanda.com resources: + - consoles - nodepools - redpandas verbs: From 1e4950a0f2317493a599cd21e22a7ad4c04ac422 Mon Sep 17 00:00:00 2001 From: Andrew Stucki Date: Wed, 26 Nov 2025 09:48:17 -0500 Subject: [PATCH 07/12] fix reference names --- acceptance/features/console-upgrades.feature | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/acceptance/features/console-upgrades.feature b/acceptance/features/console-upgrades.feature index baef48d55..1c7b3b719 100644 --- a/acceptance/features/console-upgrades.feature +++ b/acceptance/features/console-upgrades.feature @@ -37,7 +37,7 @@ Feature: Upgrading the operator with Console installed experimental: true """ And cluster "operator-console-upgrade" should be stable with 1 nodes - And the migrated console cluster "operator-console-upgrade-console" should have 0 warnings + And the migrated console cluster "operator-console-upgrade" should have 0 warnings @skip:gke @skip:aks @skip:eks Scenario: Console v2 to v3 with warnings @@ -83,4 +83,4 @@ Feature: Upgrading the operator with Console installed experimental: true """ And cluster "operator-console-upgrade-warnings" should be stable with 1 nodes - And the migrated console cluster "operator-console-upgrade-console" should have 1 warning + And the migrated console cluster "operator-console-upgrade-warnings" should have 1 warning From b9d31c8c58333aca7ad41ef49a982816d695fc00 Mon Sep 17 00:00:00 2001 From: Andrew Stucki Date: Wed, 26 Nov 2025 10:23:13 -0500 Subject: [PATCH 08/12] swap from console GC to external service --- .../redpanda/redpanda_controller_test.go | 48 ++++++++++++++----- 1 file changed, 36 insertions(+), 12 deletions(-) diff --git a/operator/internal/controller/redpanda/redpanda_controller_test.go b/operator/internal/controller/redpanda/redpanda_controller_test.go index 4063e06c0..e2d27d554 100644 --- a/operator/internal/controller/redpanda/redpanda_controller_test.go +++ b/operator/internal/controller/redpanda/redpanda_controller_test.go @@ -107,7 +107,19 @@ func (s *RedpandaControllerSuite) TestManaged() { func (s *RedpandaControllerSuite) TestObjectsGCed() { rp := s.minimalRP() - rp.Spec.ClusterSpec.Console.Enabled = ptr.To(true) + + // NB: this test originally tested GC behavior through Console deployments + // now that Console stanzas are migrated to their own CRD which we intentionally + // orphan, we need to test this with some other resource that gets GC'd, here + // namely an external service. + rp.Spec.ClusterSpec.External.Enabled = ptr.To(true) + rp.Spec.ClusterSpec.External.Service = &redpandav1alpha2.ExternalService{ + Enabled: ptr.To(true), + } + rp.Spec.ClusterSpec.External.Type = ptr.To(string(corev1.ServiceTypeLoadBalancer)) + for range *rp.Spec.ClusterSpec.Statefulset.Replicas { + rp.Spec.ClusterSpec.External.Addresses = append(rp.Spec.ClusterSpec.External.Addresses, "127.0.0.1:1234") + } s.applyAndWait(rp) @@ -151,22 +163,34 @@ func (s *RedpandaControllerSuite) TestObjectsGCed() { s.Require().NoError(s.client.Create(s.ctx, secret)) } - // Assert that the console deployment exists + // Assert that the external service exists s.EventuallyWithT(func(t *assert.CollectT) { - var deployments appsv1.DeploymentList - assert.NoError(t, s.client.List(s.ctx, &deployments, client.MatchingLabels{"app.kubernetes.io/instance": rp.Name, "app.kubernetes.io/name": "console"})) - assert.Len(t, deployments.Items, 1) - }, time.Minute, time.Second, "console deployment not scheduled") + var services corev1.ServiceList + assert.NoError(t, s.client.List(s.ctx, &services, client.MatchingLabels{"app.kubernetes.io/instance": rp.Name, "app.kubernetes.io/name": "redpanda"})) + found := false + for _, service := range services.Items { + if service.Spec.Type == corev1.ServiceTypeLoadBalancer { + found = true + } + } + assert.True(t, found) + }, time.Minute, time.Second, "external service not found") - rp.Spec.ClusterSpec.Console.Enabled = ptr.To(false) + rp.Spec.ClusterSpec.External.Enabled = ptr.To(false) s.applyAndWait(rp) - // Assert that the console deployment has been garbage collected. + // Assert that the external service has been garbage collected. s.EventuallyWithT(func(t *assert.CollectT) { - var deployments appsv1.DeploymentList - assert.NoError(t, s.client.List(s.ctx, &deployments, client.MatchingLabels{"app.kubernetes.io/instance": rp.Name, "app.kubernetes.io/name": "console"})) - assert.Len(t, deployments.Items, 0) - }, time.Minute, time.Second, "console deployment not GC'd") + var services corev1.ServiceList + assert.NoError(t, s.client.List(s.ctx, &services, client.MatchingLabels{"app.kubernetes.io/instance": rp.Name, "app.kubernetes.io/name": "redpanda"})) + found := false + for _, service := range services.Items { + if service.Spec.Type == corev1.ServiceTypeLoadBalancer { + found = true + } + } + assert.False(t, found) + }, time.Minute, time.Second, "external service not GC'd") // Assert that our previously created secrets have not been GC'd. for _, secret := range secrets { From 7411da3cc2c1564eb23e8602b42f7cfa7593d7e4 Mon Sep 17 00:00:00 2001 From: Andrew Stucki Date: Wed, 26 Nov 2025 10:58:02 -0500 Subject: [PATCH 09/12] retry on missing console --- acceptance/steps/console.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/acceptance/steps/console.go b/acceptance/steps/console.go index ecad98983..e27daf32c 100644 --- a/acceptance/steps/console.go +++ b/acceptance/steps/console.go @@ -40,7 +40,11 @@ func consoleHasWarnings(ctx context.Context, t framework.TestingT, name string, t.Logf("Checking console %q has %d warning(s)", name, expected) require.Eventually(t, func() bool { var console redpandav1alpha2.Console - require.NoError(t, t.Get(ctx, key, &console)) + if t.Get(ctx, key, &console) != nil { + // we have an error fetching, maybe have not yet reconciled, + // so just try again + return false + } return len(console.Spec.Warnings) == expected }, time.Minute, 10*time.Second) From 2947e8a3cf36aa5785a47a1f481da95839a00894 Mon Sep 17 00:00:00 2001 From: Andrew Stucki Date: Wed, 26 Nov 2025 11:47:45 -0500 Subject: [PATCH 10/12] Add partial generation for other fields defaulted in values.yaml --- charts/console/rendervalues.go | 4 +- charts/console/rendervalues_partial.gen.go | 76 +++--- .../v1alpha2/zz_generated.conversion.go | 238 ++++++++++++++---- 3 files changed, 228 insertions(+), 90 deletions(-) diff --git a/charts/console/rendervalues.go b/charts/console/rendervalues.go index 8477360fd..5a5f383ac 100644 --- a/charts/console/rendervalues.go +++ b/charts/console/rendervalues.go @@ -28,8 +28,8 @@ type RenderValues struct { Annotations map[string]string `json:"annotations"` PodAnnotations map[string]string `json:"podAnnotations"` PodLabels map[string]string `json:"podLabels"` - PodSecurityContext corev1.PodSecurityContext `json:"podSecurityContext"` - SecurityContext corev1.SecurityContext `json:"securityContext"` + PodSecurityContext corev1.PodSecurityContext `json:"podSecurityContext" partial:"builtin"` + SecurityContext corev1.SecurityContext `json:"securityContext" partial:"builtin"` Service ServiceConfig `json:"service"` Ingress IngressConfig `json:"ingress"` Resources corev1.ResourceRequirements `json:"resources"` diff --git a/charts/console/rendervalues_partial.gen.go b/charts/console/rendervalues_partial.gen.go index 313e5b718..0d771a02a 100644 --- a/charts/console/rendervalues_partial.gen.go +++ b/charts/console/rendervalues_partial.gen.go @@ -21,44 +21,44 @@ import ( ) type PartialRenderValues struct { - ReplicaCount *int32 "json:\"replicaCount,omitempty\"" - NameOverride *string "json:\"nameOverride,omitempty\"" - CommonLabels map[string]string "json:\"commonLabels,omitempty\"" - FullnameOverride *string "json:\"fullnameOverride,omitempty\"" - Image *PartialImage "json:\"image,omitempty\"" - ImagePullSecrets []corev1.LocalObjectReference "json:\"imagePullSecrets,omitempty\"" - AutomountServiceAccountToken *bool "json:\"automountServiceAccountToken,omitempty\"" - ServiceAccount *PartialServiceAccountConfig "json:\"serviceAccount,omitempty\"" - Annotations map[string]string "json:\"annotations,omitempty\"" - PodAnnotations map[string]string "json:\"podAnnotations,omitempty\"" - PodLabels map[string]string "json:\"podLabels,omitempty\"" - PodSecurityContext *corev1.PodSecurityContext "json:\"podSecurityContext,omitempty\"" - SecurityContext *corev1.SecurityContext "json:\"securityContext,omitempty\"" - Service *PartialServiceConfig "json:\"service,omitempty\"" - Ingress *PartialIngressConfig "json:\"ingress,omitempty\"" - Resources *corev1.ResourceRequirements "json:\"resources,omitempty\"" - Autoscaling *PartialAutoScaling "json:\"autoscaling,omitempty\"" - NodeSelector map[string]string "json:\"nodeSelector,omitempty\"" - Tolerations []corev1.Toleration "json:\"tolerations,omitempty\"" - Affinity *corev1.Affinity "json:\"affinity,omitempty\"" - TopologySpreadConstraints []corev1.TopologySpreadConstraint "json:\"topologySpreadConstraints,omitempty\"" - PriorityClassName *string "json:\"priorityClassName,omitempty\"" - Config map[string]any "json:\"config,omitempty\"" - ExtraEnv []corev1.EnvVar "json:\"extraEnv,omitempty\"" - ExtraEnvFrom []corev1.EnvFromSource "json:\"extraEnvFrom,omitempty\"" - ExtraVolumes []corev1.Volume "json:\"extraVolumes,omitempty\"" - ExtraVolumeMounts []corev1.VolumeMount "json:\"extraVolumeMounts,omitempty\"" - ExtraContainers []corev1.Container "json:\"extraContainers,omitempty\"" - ExtraContainerPorts []corev1.ContainerPort "json:\"extraContainerPorts,omitempty\"" - InitContainers *PartialInitContainers "json:\"initContainers,omitempty\"" - SecretMounts []PartialSecretMount "json:\"secretMounts,omitempty\"" - Secret *PartialSecretConfig "json:\"secret,omitempty\"" - LicenseSecretRef *corev1.SecretKeySelector "json:\"licenseSecretRef,omitempty\"" - LivenessProbe *applycorev1.ProbeApplyConfiguration "json:\"livenessProbe,omitempty\"" - ReadinessProbe *applycorev1.ProbeApplyConfiguration "json:\"readinessProbe,omitempty\"" - ConfigMap *PartialCreatable "json:\"configmap,omitempty\"" - Deployment *PartialDeploymentConfig "json:\"deployment,omitempty\"" - Strategy *appsv1.DeploymentStrategy "json:\"strategy,omitempty\"" + ReplicaCount *int32 "json:\"replicaCount,omitempty\"" + NameOverride *string "json:\"nameOverride,omitempty\"" + CommonLabels map[string]string "json:\"commonLabels,omitempty\"" + FullnameOverride *string "json:\"fullnameOverride,omitempty\"" + Image *PartialImage "json:\"image,omitempty\"" + ImagePullSecrets []corev1.LocalObjectReference "json:\"imagePullSecrets,omitempty\"" + AutomountServiceAccountToken *bool "json:\"automountServiceAccountToken,omitempty\"" + ServiceAccount *PartialServiceAccountConfig "json:\"serviceAccount,omitempty\"" + Annotations map[string]string "json:\"annotations,omitempty\"" + PodAnnotations map[string]string "json:\"podAnnotations,omitempty\"" + PodLabels map[string]string "json:\"podLabels,omitempty\"" + PodSecurityContext *applycorev1.PodSecurityContextApplyConfiguration "json:\"podSecurityContext,omitempty\"" + SecurityContext *applycorev1.SecurityContextApplyConfiguration "json:\"securityContext,omitempty\"" + Service *PartialServiceConfig "json:\"service,omitempty\"" + Ingress *PartialIngressConfig "json:\"ingress,omitempty\"" + Resources *corev1.ResourceRequirements "json:\"resources,omitempty\"" + Autoscaling *PartialAutoScaling "json:\"autoscaling,omitempty\"" + NodeSelector map[string]string "json:\"nodeSelector,omitempty\"" + Tolerations []corev1.Toleration "json:\"tolerations,omitempty\"" + Affinity *corev1.Affinity "json:\"affinity,omitempty\"" + TopologySpreadConstraints []corev1.TopologySpreadConstraint "json:\"topologySpreadConstraints,omitempty\"" + PriorityClassName *string "json:\"priorityClassName,omitempty\"" + Config map[string]any "json:\"config,omitempty\"" + ExtraEnv []corev1.EnvVar "json:\"extraEnv,omitempty\"" + ExtraEnvFrom []corev1.EnvFromSource "json:\"extraEnvFrom,omitempty\"" + ExtraVolumes []corev1.Volume "json:\"extraVolumes,omitempty\"" + ExtraVolumeMounts []corev1.VolumeMount "json:\"extraVolumeMounts,omitempty\"" + ExtraContainers []corev1.Container "json:\"extraContainers,omitempty\"" + ExtraContainerPorts []corev1.ContainerPort "json:\"extraContainerPorts,omitempty\"" + InitContainers *PartialInitContainers "json:\"initContainers,omitempty\"" + SecretMounts []PartialSecretMount "json:\"secretMounts,omitempty\"" + Secret *PartialSecretConfig "json:\"secret,omitempty\"" + LicenseSecretRef *corev1.SecretKeySelector "json:\"licenseSecretRef,omitempty\"" + LivenessProbe *applycorev1.ProbeApplyConfiguration "json:\"livenessProbe,omitempty\"" + ReadinessProbe *applycorev1.ProbeApplyConfiguration "json:\"readinessProbe,omitempty\"" + ConfigMap *PartialCreatable "json:\"configmap,omitempty\"" + Deployment *PartialDeploymentConfig "json:\"deployment,omitempty\"" + Strategy *appsv1.DeploymentStrategy "json:\"strategy,omitempty\"" } type PartialImage struct { diff --git a/operator/api/redpanda/v1alpha2/zz_generated.conversion.go b/operator/api/redpanda/v1alpha2/zz_generated.conversion.go index c374aa7d7..778ef0464 100644 --- a/operator/api/redpanda/v1alpha2/zz_generated.conversion.go +++ b/operator/api/redpanda/v1alpha2/zz_generated.conversion.go @@ -6,11 +6,12 @@ package v1alpha2 import ( v3 "github.com/redpanda-data/redpanda-operator/charts/console/v3" ir "github.com/redpanda-data/redpanda-operator/pkg/ir" - v11 "k8s.io/api/apps/v1" + v12 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - v13 "k8s.io/api/networking/v1" - v12 "k8s.io/apimachinery/pkg/apis/meta/v1" + v14 "k8s.io/api/networking/v1" + v13 "k8s.io/apimachinery/pkg/apis/meta/v1" intstr "k8s.io/apimachinery/pkg/util/intstr" + v11 "k8s.io/client-go/applyconfigurations/core/v1" ) func init() { @@ -74,8 +75,8 @@ func init() { consolePartialRenderValues.PodLabels[key4] = value4 } } - consolePartialRenderValues.PodSecurityContext = pV1PodSecurityContextToPV1PodSecurityContext((*source).PodSecurityContext) - consolePartialRenderValues.SecurityContext = pV1SecurityContextToPV1SecurityContext((*source).SecurityContext) + consolePartialRenderValues.PodSecurityContext = pV1PodSecurityContextToPV1PodSecurityContextApplyConfiguration((*source).PodSecurityContext) + consolePartialRenderValues.SecurityContext = pV1SecurityContextToPV1SecurityContextApplyConfiguration((*source).SecurityContext) consolePartialRenderValues.Service = pV1alpha2ServiceConfigToPConsolePartialServiceConfig((*source).Service) consolePartialRenderValues.Ingress = pV1alpha2IngressConfigToPConsolePartialIngressConfig((*source).Ingress) consolePartialRenderValues.Resources = pV1ResourceRequirementsToPV1ResourceRequirements((*source).Resources) @@ -506,7 +507,7 @@ func pV1AppArmorProfileToPV1AppArmorProfile(source *v1.AppArmorProfile) *v1.AppA var pV1AppArmorProfile *v1.AppArmorProfile if source != nil { var v1AppArmorProfile v1.AppArmorProfile - v1AppArmorProfile.Type = v1.AppArmorProfileType((*source).Type) + v1AppArmorProfile.Type = v1AppArmorProfileTypeToV1AppArmorProfileType((*source).Type) if (*source).LocalhostProfile != nil { xstring := *(*source).LocalhostProfile v1AppArmorProfile.LocalhostProfile = &xstring @@ -515,6 +516,20 @@ func pV1AppArmorProfileToPV1AppArmorProfile(source *v1.AppArmorProfile) *v1.AppA } return pV1AppArmorProfile } +func pV1AppArmorProfileToPV1AppArmorProfileApplyConfiguration(source *v1.AppArmorProfile) *v11.AppArmorProfileApplyConfiguration { + var pV1AppArmorProfileApplyConfiguration *v11.AppArmorProfileApplyConfiguration + if source != nil { + var v1AppArmorProfileApplyConfiguration v11.AppArmorProfileApplyConfiguration + pV1AppArmorProfileType := v1AppArmorProfileTypeToV1AppArmorProfileType((*source).Type) + v1AppArmorProfileApplyConfiguration.Type = &pV1AppArmorProfileType + if (*source).LocalhostProfile != nil { + xstring := *(*source).LocalhostProfile + v1AppArmorProfileApplyConfiguration.LocalhostProfile = &xstring + } + pV1AppArmorProfileApplyConfiguration = &v1AppArmorProfileApplyConfiguration + } + return pV1AppArmorProfileApplyConfiguration +} func pV1CapabilitiesToPV1Capabilities(source *v1.Capabilities) *v1.Capabilities { var pV1Capabilities *v1.Capabilities if source != nil { @@ -535,6 +550,26 @@ func pV1CapabilitiesToPV1Capabilities(source *v1.Capabilities) *v1.Capabilities } return pV1Capabilities } +func pV1CapabilitiesToPV1CapabilitiesApplyConfiguration(source *v1.Capabilities) *v11.CapabilitiesApplyConfiguration { + var pV1CapabilitiesApplyConfiguration *v11.CapabilitiesApplyConfiguration + if source != nil { + var v1CapabilitiesApplyConfiguration v11.CapabilitiesApplyConfiguration + if (*source).Add != nil { + v1CapabilitiesApplyConfiguration.Add = make([]v1.Capability, len((*source).Add)) + for i := 0; i < len((*source).Add); i++ { + v1CapabilitiesApplyConfiguration.Add[i] = v1CapabilityToV1Capability((*source).Add[i]) + } + } + if (*source).Drop != nil { + v1CapabilitiesApplyConfiguration.Drop = make([]v1.Capability, len((*source).Drop)) + for j := 0; j < len((*source).Drop); j++ { + v1CapabilitiesApplyConfiguration.Drop[j] = v1CapabilityToV1Capability((*source).Drop[j]) + } + } + pV1CapabilitiesApplyConfiguration = &v1CapabilitiesApplyConfiguration + } + return pV1CapabilitiesApplyConfiguration +} func pV1ConfigMapEnvSourceToPV1ConfigMapEnvSource(source *v1.ConfigMapEnvSource) *v1.ConfigMapEnvSource { var pV1ConfigMapEnvSource *v1.ConfigMapEnvSource if source != nil { @@ -562,11 +597,11 @@ func pV1ConfigMapKeySelectorToPV1ConfigMapKeySelector(source *v1.ConfigMapKeySel } return pV1ConfigMapKeySelector } -func pV1DeploymentStrategyToPV1DeploymentStrategy(source *v11.DeploymentStrategy) *v11.DeploymentStrategy { - var pV1DeploymentStrategy *v11.DeploymentStrategy +func pV1DeploymentStrategyToPV1DeploymentStrategy(source *v12.DeploymentStrategy) *v12.DeploymentStrategy { + var pV1DeploymentStrategy *v12.DeploymentStrategy if source != nil { - var v1DeploymentStrategy v11.DeploymentStrategy - v1DeploymentStrategy.Type = v11.DeploymentStrategyType((*source).Type) + var v1DeploymentStrategy v12.DeploymentStrategy + v1DeploymentStrategy.Type = v12.DeploymentStrategyType((*source).Type) v1DeploymentStrategy.RollingUpdate = pV1RollingUpdateDeploymentToPV1RollingUpdateDeployment((*source).RollingUpdate) pV1DeploymentStrategy = &v1DeploymentStrategy } @@ -617,10 +652,10 @@ func pV1HTTPGetActionToPV1HTTPGetAction(source *v1.HTTPGetAction) *v1.HTTPGetAct } return pV1HTTPGetAction } -func pV1LabelSelectorToPV1LabelSelector(source *v12.LabelSelector) *v12.LabelSelector { - var pV1LabelSelector *v12.LabelSelector +func pV1LabelSelectorToPV1LabelSelector(source *v13.LabelSelector) *v13.LabelSelector { + var pV1LabelSelector *v13.LabelSelector if source != nil { - var v1LabelSelector v12.LabelSelector + var v1LabelSelector v13.LabelSelector if (*source).MatchLabels != nil { v1LabelSelector.MatchLabels = make(map[string]string, len((*source).MatchLabels)) for key, value := range (*source).MatchLabels { @@ -628,7 +663,7 @@ func pV1LabelSelectorToPV1LabelSelector(source *v12.LabelSelector) *v12.LabelSel } } if (*source).MatchExpressions != nil { - v1LabelSelector.MatchExpressions = make([]v12.LabelSelectorRequirement, len((*source).MatchExpressions)) + v1LabelSelector.MatchExpressions = make([]v13.LabelSelectorRequirement, len((*source).MatchExpressions)) for i := 0; i < len((*source).MatchExpressions); i++ { v1LabelSelector.MatchExpressions[i] = v1LabelSelectorRequirementToV1LabelSelectorRequirement((*source).MatchExpressions[i]) } @@ -732,57 +767,57 @@ func pV1PodAntiAffinityToPV1PodAntiAffinity(source *v1.PodAntiAffinity) *v1.PodA } return pV1PodAntiAffinity } -func pV1PodSecurityContextToPV1PodSecurityContext(source *v1.PodSecurityContext) *v1.PodSecurityContext { - var pV1PodSecurityContext *v1.PodSecurityContext +func pV1PodSecurityContextToPV1PodSecurityContextApplyConfiguration(source *v1.PodSecurityContext) *v11.PodSecurityContextApplyConfiguration { + var pV1PodSecurityContextApplyConfiguration *v11.PodSecurityContextApplyConfiguration if source != nil { - var v1PodSecurityContext v1.PodSecurityContext - v1PodSecurityContext.SELinuxOptions = pV1SELinuxOptionsToPV1SELinuxOptions((*source).SELinuxOptions) - v1PodSecurityContext.WindowsOptions = pV1WindowsSecurityContextOptionsToPV1WindowsSecurityContextOptions((*source).WindowsOptions) + var v1PodSecurityContextApplyConfiguration v11.PodSecurityContextApplyConfiguration + v1PodSecurityContextApplyConfiguration.SELinuxOptions = pV1SELinuxOptionsToPV1SELinuxOptionsApplyConfiguration((*source).SELinuxOptions) + v1PodSecurityContextApplyConfiguration.WindowsOptions = pV1WindowsSecurityContextOptionsToPV1WindowsSecurityContextOptionsApplyConfiguration((*source).WindowsOptions) if (*source).RunAsUser != nil { xint64 := *(*source).RunAsUser - v1PodSecurityContext.RunAsUser = &xint64 + v1PodSecurityContextApplyConfiguration.RunAsUser = &xint64 } if (*source).RunAsGroup != nil { xint642 := *(*source).RunAsGroup - v1PodSecurityContext.RunAsGroup = &xint642 + v1PodSecurityContextApplyConfiguration.RunAsGroup = &xint642 } if (*source).RunAsNonRoot != nil { xbool := *(*source).RunAsNonRoot - v1PodSecurityContext.RunAsNonRoot = &xbool + v1PodSecurityContextApplyConfiguration.RunAsNonRoot = &xbool } if (*source).SupplementalGroups != nil { - v1PodSecurityContext.SupplementalGroups = make([]int64, len((*source).SupplementalGroups)) + v1PodSecurityContextApplyConfiguration.SupplementalGroups = make([]int64, len((*source).SupplementalGroups)) for i := 0; i < len((*source).SupplementalGroups); i++ { - v1PodSecurityContext.SupplementalGroups[i] = (*source).SupplementalGroups[i] + v1PodSecurityContextApplyConfiguration.SupplementalGroups[i] = (*source).SupplementalGroups[i] } } if (*source).SupplementalGroupsPolicy != nil { v1SupplementalGroupsPolicy := v1.SupplementalGroupsPolicy(*(*source).SupplementalGroupsPolicy) - v1PodSecurityContext.SupplementalGroupsPolicy = &v1SupplementalGroupsPolicy + v1PodSecurityContextApplyConfiguration.SupplementalGroupsPolicy = &v1SupplementalGroupsPolicy } if (*source).FSGroup != nil { xint643 := *(*source).FSGroup - v1PodSecurityContext.FSGroup = &xint643 + v1PodSecurityContextApplyConfiguration.FSGroup = &xint643 } if (*source).Sysctls != nil { - v1PodSecurityContext.Sysctls = make([]v1.Sysctl, len((*source).Sysctls)) + v1PodSecurityContextApplyConfiguration.Sysctls = make([]v11.SysctlApplyConfiguration, len((*source).Sysctls)) for j := 0; j < len((*source).Sysctls); j++ { - v1PodSecurityContext.Sysctls[j] = v1SysctlToV1Sysctl((*source).Sysctls[j]) + v1PodSecurityContextApplyConfiguration.Sysctls[j] = v1SysctlToV1SysctlApplyConfiguration((*source).Sysctls[j]) } } if (*source).FSGroupChangePolicy != nil { v1PodFSGroupChangePolicy := v1.PodFSGroupChangePolicy(*(*source).FSGroupChangePolicy) - v1PodSecurityContext.FSGroupChangePolicy = &v1PodFSGroupChangePolicy + v1PodSecurityContextApplyConfiguration.FSGroupChangePolicy = &v1PodFSGroupChangePolicy } - v1PodSecurityContext.SeccompProfile = pV1SeccompProfileToPV1SeccompProfile((*source).SeccompProfile) - v1PodSecurityContext.AppArmorProfile = pV1AppArmorProfileToPV1AppArmorProfile((*source).AppArmorProfile) + v1PodSecurityContextApplyConfiguration.SeccompProfile = pV1SeccompProfileToPV1SeccompProfileApplyConfiguration((*source).SeccompProfile) + v1PodSecurityContextApplyConfiguration.AppArmorProfile = pV1AppArmorProfileToPV1AppArmorProfileApplyConfiguration((*source).AppArmorProfile) if (*source).SELinuxChangePolicy != nil { v1PodSELinuxChangePolicy := v1.PodSELinuxChangePolicy(*(*source).SELinuxChangePolicy) - v1PodSecurityContext.SELinuxChangePolicy = &v1PodSELinuxChangePolicy + v1PodSecurityContextApplyConfiguration.SELinuxChangePolicy = &v1PodSELinuxChangePolicy } - pV1PodSecurityContext = &v1PodSecurityContext + pV1PodSecurityContextApplyConfiguration = &v1PodSecurityContextApplyConfiguration } - return pV1PodSecurityContext + return pV1PodSecurityContextApplyConfiguration } func pV1ProbeToPV1Probe(source *v1.Probe) *v1.Probe { var pV1Probe *v1.Probe @@ -810,10 +845,10 @@ func pV1ResourceRequirementsToPV1ResourceRequirements(source *v1.ResourceRequire } return pV1ResourceRequirements } -func pV1RollingUpdateDeploymentToPV1RollingUpdateDeployment(source *v11.RollingUpdateDeployment) *v11.RollingUpdateDeployment { - var pV1RollingUpdateDeployment *v11.RollingUpdateDeployment +func pV1RollingUpdateDeploymentToPV1RollingUpdateDeployment(source *v12.RollingUpdateDeployment) *v12.RollingUpdateDeployment { + var pV1RollingUpdateDeployment *v12.RollingUpdateDeployment if source != nil { - var v1RollingUpdateDeployment v11.RollingUpdateDeployment + var v1RollingUpdateDeployment v12.RollingUpdateDeployment v1RollingUpdateDeployment.MaxUnavailable = pIntstrIntOrStringToPIntstrIntOrString((*source).MaxUnavailable) v1RollingUpdateDeployment.MaxSurge = pIntstrIntOrStringToPIntstrIntOrString((*source).MaxSurge) pV1RollingUpdateDeployment = &v1RollingUpdateDeployment @@ -832,11 +867,27 @@ func pV1SELinuxOptionsToPV1SELinuxOptions(source *v1.SELinuxOptions) *v1.SELinux } return pV1SELinuxOptions } +func pV1SELinuxOptionsToPV1SELinuxOptionsApplyConfiguration(source *v1.SELinuxOptions) *v11.SELinuxOptionsApplyConfiguration { + var pV1SELinuxOptionsApplyConfiguration *v11.SELinuxOptionsApplyConfiguration + if source != nil { + var v1SELinuxOptionsApplyConfiguration v11.SELinuxOptionsApplyConfiguration + pString := (*source).User + v1SELinuxOptionsApplyConfiguration.User = &pString + pString2 := (*source).Role + v1SELinuxOptionsApplyConfiguration.Role = &pString2 + pString3 := (*source).Type + v1SELinuxOptionsApplyConfiguration.Type = &pString3 + pString4 := (*source).Level + v1SELinuxOptionsApplyConfiguration.Level = &pString4 + pV1SELinuxOptionsApplyConfiguration = &v1SELinuxOptionsApplyConfiguration + } + return pV1SELinuxOptionsApplyConfiguration +} func pV1SeccompProfileToPV1SeccompProfile(source *v1.SeccompProfile) *v1.SeccompProfile { var pV1SeccompProfile *v1.SeccompProfile if source != nil { var v1SeccompProfile v1.SeccompProfile - v1SeccompProfile.Type = v1.SeccompProfileType((*source).Type) + v1SeccompProfile.Type = v1SeccompProfileTypeToV1SeccompProfileType((*source).Type) if (*source).LocalhostProfile != nil { xstring := *(*source).LocalhostProfile v1SeccompProfile.LocalhostProfile = &xstring @@ -845,6 +896,20 @@ func pV1SeccompProfileToPV1SeccompProfile(source *v1.SeccompProfile) *v1.Seccomp } return pV1SeccompProfile } +func pV1SeccompProfileToPV1SeccompProfileApplyConfiguration(source *v1.SeccompProfile) *v11.SeccompProfileApplyConfiguration { + var pV1SeccompProfileApplyConfiguration *v11.SeccompProfileApplyConfiguration + if source != nil { + var v1SeccompProfileApplyConfiguration v11.SeccompProfileApplyConfiguration + pV1SeccompProfileType := v1SeccompProfileTypeToV1SeccompProfileType((*source).Type) + v1SeccompProfileApplyConfiguration.Type = &pV1SeccompProfileType + if (*source).LocalhostProfile != nil { + xstring := *(*source).LocalhostProfile + v1SeccompProfileApplyConfiguration.LocalhostProfile = &xstring + } + pV1SeccompProfileApplyConfiguration = &v1SeccompProfileApplyConfiguration + } + return pV1SeccompProfileApplyConfiguration +} func pV1SecretEnvSourceToPV1SecretEnvSource(source *v1.SecretEnvSource) *v1.SecretEnvSource { var pV1SecretEnvSource *v1.SecretEnvSource if source != nil { @@ -913,6 +978,47 @@ func pV1SecurityContextToPV1SecurityContext(source *v1.SecurityContext) *v1.Secu } return pV1SecurityContext } +func pV1SecurityContextToPV1SecurityContextApplyConfiguration(source *v1.SecurityContext) *v11.SecurityContextApplyConfiguration { + var pV1SecurityContextApplyConfiguration *v11.SecurityContextApplyConfiguration + if source != nil { + var v1SecurityContextApplyConfiguration v11.SecurityContextApplyConfiguration + v1SecurityContextApplyConfiguration.Capabilities = pV1CapabilitiesToPV1CapabilitiesApplyConfiguration((*source).Capabilities) + if (*source).Privileged != nil { + xbool := *(*source).Privileged + v1SecurityContextApplyConfiguration.Privileged = &xbool + } + v1SecurityContextApplyConfiguration.SELinuxOptions = pV1SELinuxOptionsToPV1SELinuxOptionsApplyConfiguration((*source).SELinuxOptions) + v1SecurityContextApplyConfiguration.WindowsOptions = pV1WindowsSecurityContextOptionsToPV1WindowsSecurityContextOptionsApplyConfiguration((*source).WindowsOptions) + if (*source).RunAsUser != nil { + xint64 := *(*source).RunAsUser + v1SecurityContextApplyConfiguration.RunAsUser = &xint64 + } + if (*source).RunAsGroup != nil { + xint642 := *(*source).RunAsGroup + v1SecurityContextApplyConfiguration.RunAsGroup = &xint642 + } + if (*source).RunAsNonRoot != nil { + xbool2 := *(*source).RunAsNonRoot + v1SecurityContextApplyConfiguration.RunAsNonRoot = &xbool2 + } + if (*source).ReadOnlyRootFilesystem != nil { + xbool3 := *(*source).ReadOnlyRootFilesystem + v1SecurityContextApplyConfiguration.ReadOnlyRootFilesystem = &xbool3 + } + if (*source).AllowPrivilegeEscalation != nil { + xbool4 := *(*source).AllowPrivilegeEscalation + v1SecurityContextApplyConfiguration.AllowPrivilegeEscalation = &xbool4 + } + if (*source).ProcMount != nil { + v1ProcMountType := v1.ProcMountType(*(*source).ProcMount) + v1SecurityContextApplyConfiguration.ProcMount = &v1ProcMountType + } + v1SecurityContextApplyConfiguration.SeccompProfile = pV1SeccompProfileToPV1SeccompProfileApplyConfiguration((*source).SeccompProfile) + v1SecurityContextApplyConfiguration.AppArmorProfile = pV1AppArmorProfileToPV1AppArmorProfileApplyConfiguration((*source).AppArmorProfile) + pV1SecurityContextApplyConfiguration = &v1SecurityContextApplyConfiguration + } + return pV1SecurityContextApplyConfiguration +} func pV1SleepActionToPV1SleepAction(source *v1.SleepAction) *v1.SleepAction { var pV1SleepAction *v1.SleepAction if source != nil { @@ -956,6 +1062,30 @@ func pV1WindowsSecurityContextOptionsToPV1WindowsSecurityContextOptions(source * } return pV1WindowsSecurityContextOptions } +func pV1WindowsSecurityContextOptionsToPV1WindowsSecurityContextOptionsApplyConfiguration(source *v1.WindowsSecurityContextOptions) *v11.WindowsSecurityContextOptionsApplyConfiguration { + var pV1WindowsSecurityContextOptionsApplyConfiguration *v11.WindowsSecurityContextOptionsApplyConfiguration + if source != nil { + var v1WindowsSecurityContextOptionsApplyConfiguration v11.WindowsSecurityContextOptionsApplyConfiguration + if (*source).GMSACredentialSpecName != nil { + xstring := *(*source).GMSACredentialSpecName + v1WindowsSecurityContextOptionsApplyConfiguration.GMSACredentialSpecName = &xstring + } + if (*source).GMSACredentialSpec != nil { + xstring2 := *(*source).GMSACredentialSpec + v1WindowsSecurityContextOptionsApplyConfiguration.GMSACredentialSpec = &xstring2 + } + if (*source).RunAsUserName != nil { + xstring3 := *(*source).RunAsUserName + v1WindowsSecurityContextOptionsApplyConfiguration.RunAsUserName = &xstring3 + } + if (*source).HostProcess != nil { + xbool := *(*source).HostProcess + v1WindowsSecurityContextOptionsApplyConfiguration.HostProcess = &xbool + } + pV1WindowsSecurityContextOptionsApplyConfiguration = &v1WindowsSecurityContextOptionsApplyConfiguration + } + return pV1WindowsSecurityContextOptionsApplyConfiguration +} func pV1alpha2AuthenticationSecretsToPConsolePartialAuthenticationSecrets(source *AuthenticationSecrets) *v3.PartialAuthenticationSecrets { var pConsolePartialAuthenticationSecrets *v3.PartialAuthenticationSecrets if source != nil { @@ -1055,7 +1185,7 @@ func pV1alpha2IngressConfigToPConsolePartialIngressConfig(source *IngressConfig) } } if (*source).TLS != nil { - consolePartialIngressConfig.TLS = make([]v13.IngressTLS, len((*source).TLS)) + consolePartialIngressConfig.TLS = make([]v14.IngressTLS, len((*source).TLS)) for j := 0; j < len((*source).TLS); j++ { consolePartialIngressConfig.TLS[j] = v1IngressTLSToV1IngressTLS((*source).TLS[j]) } @@ -1211,6 +1341,9 @@ func pV1alpha2ServiceConfigToPConsolePartialServiceConfig(source *ServiceConfig) } return pConsolePartialServiceConfig } +func v1AppArmorProfileTypeToV1AppArmorProfileType(source v1.AppArmorProfileType) v1.AppArmorProfileType { + return v1.AppArmorProfileType(source) +} func v1CapabilityToV1Capability(source v1.Capability) v1.Capability { return v1.Capability(source) } @@ -1313,8 +1446,8 @@ func v1HTTPHeaderToV1HTTPHeader(source v1.HTTPHeader) v1.HTTPHeader { v1HTTPHeader.Value = source.Value return v1HTTPHeader } -func v1IngressTLSToV1IngressTLS(source v13.IngressTLS) v13.IngressTLS { - var v1IngressTLS v13.IngressTLS +func v1IngressTLSToV1IngressTLS(source v14.IngressTLS) v14.IngressTLS { + var v1IngressTLS v14.IngressTLS if source.Hosts != nil { v1IngressTLS.Hosts = make([]string, len(source.Hosts)) for i := 0; i < len(source.Hosts); i++ { @@ -1324,10 +1457,10 @@ func v1IngressTLSToV1IngressTLS(source v13.IngressTLS) v13.IngressTLS { v1IngressTLS.SecretName = source.SecretName return v1IngressTLS } -func v1LabelSelectorRequirementToV1LabelSelectorRequirement(source v12.LabelSelectorRequirement) v12.LabelSelectorRequirement { - var v1LabelSelectorRequirement v12.LabelSelectorRequirement +func v1LabelSelectorRequirementToV1LabelSelectorRequirement(source v13.LabelSelectorRequirement) v13.LabelSelectorRequirement { + var v1LabelSelectorRequirement v13.LabelSelectorRequirement v1LabelSelectorRequirement.Key = source.Key - v1LabelSelectorRequirement.Operator = v12.LabelSelectorOperator(source.Operator) + v1LabelSelectorRequirement.Operator = v13.LabelSelectorOperator(source.Operator) if source.Values != nil { v1LabelSelectorRequirement.Values = make([]string, len(source.Values)) for i := 0; i < len(source.Values); i++ { @@ -1411,11 +1544,16 @@ func v1ProbeHandlerToV1ProbeHandler(source v1.ProbeHandler) v1.ProbeHandler { v1ProbeHandler.GRPC = pV1GRPCActionToPV1GRPCAction(source.GRPC) return v1ProbeHandler } -func v1SysctlToV1Sysctl(source v1.Sysctl) v1.Sysctl { - var v1Sysctl v1.Sysctl - v1Sysctl.Name = source.Name - v1Sysctl.Value = source.Value - return v1Sysctl +func v1SeccompProfileTypeToV1SeccompProfileType(source v1.SeccompProfileType) v1.SeccompProfileType { + return v1.SeccompProfileType(source) +} +func v1SysctlToV1SysctlApplyConfiguration(source v1.Sysctl) v11.SysctlApplyConfiguration { + var v1SysctlApplyConfiguration v11.SysctlApplyConfiguration + pString := source.Name + v1SysctlApplyConfiguration.Name = &pString + pString2 := source.Value + v1SysctlApplyConfiguration.Value = &pString2 + return v1SysctlApplyConfiguration } func v1TolerationToV1Toleration(source v1.Toleration) v1.Toleration { var v1Toleration v1.Toleration @@ -1501,7 +1639,7 @@ func v1alpha2IngressPathToConsolePartialIngressPath(source IngressPath) v3.Parti pString := source.Path consolePartialIngressPath.Path = &pString if source.PathType != nil { - v1PathType := v13.PathType(*source.PathType) + v1PathType := v14.PathType(*source.PathType) consolePartialIngressPath.PathType = &v1PathType } return consolePartialIngressPath From 5d21433d5930ae6043ed86a1be0178ca44aad904 Mon Sep 17 00:00:00 2001 From: Andrew Stucki Date: Wed, 26 Nov 2025 12:07:14 -0500 Subject: [PATCH 11/12] Add changelog entry --- .changes/unreleased/operator-Deprecated-20251126-120545.yaml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .changes/unreleased/operator-Deprecated-20251126-120545.yaml diff --git a/.changes/unreleased/operator-Deprecated-20251126-120545.yaml b/.changes/unreleased/operator-Deprecated-20251126-120545.yaml new file mode 100644 index 000000000..42aa67429 --- /dev/null +++ b/.changes/unreleased/operator-Deprecated-20251126-120545.yaml @@ -0,0 +1,4 @@ +project: operator +kind: Deprecated +body: The entirety of the `spec.clusterSpec.console` block in the Redpanda CR is now deprecated and will be removed in the future. Any Redpanda CR that contains one will automatically be migrated to a standalone Console CR with a back reference to the parent Redpanda CR. Note that these will *not* be automatically deleted when the `console` stanza is removed or when the parent Redpanda CR is deleted. +time: 2025-11-26T12:05:45.014863-05:00 From 0afd9f589d1d939a7d987d53d4effed62b650e75 Mon Sep 17 00:00:00 2001 From: Andrew Stucki Date: Mon, 1 Dec 2025 13:19:00 -0500 Subject: [PATCH 12/12] address feedback --- acceptance/features/console-upgrades.feature | 4 ++++ gen/partial/partial.go | 3 +++ operator/api/redpanda/v1alpha2/console_types.go | 14 +++++++++----- .../api/redpanda/v1alpha2/testdata/crd-docs.adoc | 6 ++++-- .../crd/bases/cluster.redpanda.com_consoles.yaml | 8 ++++---- 5 files changed, 24 insertions(+), 11 deletions(-) diff --git a/acceptance/features/console-upgrades.feature b/acceptance/features/console-upgrades.feature index 1c7b3b719..1a9c870a4 100644 --- a/acceptance/features/console-upgrades.feature +++ b/acceptance/features/console-upgrades.feature @@ -15,6 +15,10 @@ Feature: Upgrading the operator with Console installed spec: clusterSpec: console: + # Old versions have broken chart rendering for the console stanza + # unless nameOverride is set due to mapping configmap values for + # both the console deployment and redpanda statefulset to the same + # name. Setting nameOverride to "broken" works around this. nameOverride: broken tls: enabled: false diff --git a/gen/partial/partial.go b/gen/partial/partial.go index c24b87049..3f82a9570 100644 --- a/gen/partial/partial.go +++ b/gen/partial/partial.go @@ -280,6 +280,9 @@ func (g *Generator) partializeNamed(t *types.Named, tag *StructTag) types.Type { if !isPartialized { if tag != nil { for _, value := range tag.Values { + // "builtin" is used for types that have pre-defined partialized + // variants, such as types that have pre-generated + // k8s.io/client-go/applyconfigurations types. if value == "builtin" { path := t.Obj().Pkg().Path() if override, ok := packagePartials[path]; ok { diff --git a/operator/api/redpanda/v1alpha2/console_types.go b/operator/api/redpanda/v1alpha2/console_types.go index 79fd30920..1bd431350 100644 --- a/operator/api/redpanda/v1alpha2/console_types.go +++ b/operator/api/redpanda/v1alpha2/console_types.go @@ -125,10 +125,14 @@ type ConsoleValues struct { SecretMounts []SecretMount `json:"secretMounts,omitempty"` Secret SecretConfig `json:"secret,omitempty"` LicenseSecretRef *corev1.SecretKeySelector `json:"licenseSecretRef,omitempty"` - LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` - ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` - Deployment *DeploymentConfig `json:"deployment,omitempty"` - Strategy *appsv1.DeploymentStrategy `json:"strategy,omitempty"` + // LivenessProbe describes a health check to be performed against a container to determine whether it is + // alive. + LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` + // ReadinessProbe describes a health check to be performed against a container to determine whether it is + // ready to receive traffic. + ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` + Deployment *DeploymentConfig `json:"deployment,omitempty"` + Strategy *appsv1.DeploymentStrategy `json:"strategy,omitempty"` // Warnings is a slice of human readable warnings generated by the automatic // migration of a Console V2 config to a Console V3 config. If warnings are // present, they will describe which fields from the original config have @@ -291,7 +295,7 @@ func ConvertConsoleSubchartToConsoleValues(src *RedpandaConsole) (*ConsoleValues out, err := autoconv_RedpandaConsole_To_ConsoleValues(src) if err != nil { - return nil, err + return nil, errors.WithStack(err) } // Extract out .Console and .Config. .Console will be migrated and then diff --git a/operator/api/redpanda/v1alpha2/testdata/crd-docs.adoc b/operator/api/redpanda/v1alpha2/testdata/crd-docs.adoc index fbd349dcb..761a45184 100644 --- a/operator/api/redpanda/v1alpha2/testdata/crd-docs.adoc +++ b/operator/api/redpanda/v1alpha2/testdata/crd-docs.adoc @@ -956,8 +956,10 @@ ConsoleCreateObj represents configuration options for creating Kubernetes object | *`secretMounts`* __xref:{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-secretmount[$$SecretMount$$] array__ | | | | *`secret`* __xref:{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-secretconfig[$$SecretConfig$$]__ | | | | *`licenseSecretRef`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#secretkeyselector-v1-core[$$SecretKeySelector$$]__ | | | -| *`livenessProbe`* __xref:{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-probeapplyconfiguration[$$ProbeApplyConfiguration$$]__ | | | -| *`readinessProbe`* __xref:{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-probeapplyconfiguration[$$ProbeApplyConfiguration$$]__ | | | +| *`livenessProbe`* __xref:{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-probeapplyconfiguration[$$ProbeApplyConfiguration$$]__ | LivenessProbe describes a health check to be performed against a container to determine whether it is + +alive. + | | +| *`readinessProbe`* __xref:{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-probeapplyconfiguration[$$ProbeApplyConfiguration$$]__ | ReadinessProbe describes a health check to be performed against a container to determine whether it is + +ready to receive traffic. + | | | *`deployment`* __xref:{anchor_prefix}-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-deploymentconfig[$$DeploymentConfig$$]__ | | | | *`strategy`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#deploymentstrategy-v1-apps[$$DeploymentStrategy$$]__ | | | | *`warnings`* __string array__ | Warnings is a slice of human readable warnings generated by the automatic + diff --git a/operator/config/crd/bases/cluster.redpanda.com_consoles.yaml b/operator/config/crd/bases/cluster.redpanda.com_consoles.yaml index 45aa23e5e..62cd08dbb 100644 --- a/operator/config/crd/bases/cluster.redpanda.com_consoles.yaml +++ b/operator/config/crd/bases/cluster.redpanda.com_consoles.yaml @@ -6857,8 +6857,8 @@ spec: x-kubernetes-map-type: atomic livenessProbe: description: |- - ProbeApplyConfiguration is a wrapper type that allows including a partial - [corev1.Probe] in a CRD. + LivenessProbe describes a health check to be performed against a container to determine whether it is + alive. properties: exec: description: |- @@ -7195,8 +7195,8 @@ spec: type: string readinessProbe: description: |- - ProbeApplyConfiguration is a wrapper type that allows including a partial - [corev1.Probe] in a CRD. + ReadinessProbe describes a health check to be performed against a container to determine whether it is + ready to receive traffic. properties: exec: description: |-