diff --git a/config/crds/v1/all-crds.yaml b/config/crds/v1/all-crds.yaml
index 43d95fec0e..68c0d90d8d 100644
--- a/config/crds/v1/all-crds.yaml
+++ b/config/crds/v1/all-crds.yaml
@@ -10564,6 +10564,13 @@ spec:
- secretName
type: object
type: array
+ weight:
+ default: 0
+ description: |-
+ Weight determines the priority of this policy when multiple policies target the same resource.
+ Lower weight values take precedence. Defaults to 0.
+ format: int32
+ type: integer
type: object
status:
properties:
diff --git a/config/crds/v1/resources/stackconfigpolicy.k8s.elastic.co_stackconfigpolicies.yaml b/config/crds/v1/resources/stackconfigpolicy.k8s.elastic.co_stackconfigpolicies.yaml
index e03b34f23f..826ca44b2d 100644
--- a/config/crds/v1/resources/stackconfigpolicy.k8s.elastic.co_stackconfigpolicies.yaml
+++ b/config/crds/v1/resources/stackconfigpolicy.k8s.elastic.co_stackconfigpolicies.yaml
@@ -288,6 +288,13 @@ spec:
- secretName
type: object
type: array
+ weight:
+ default: 0
+ description: |-
+ Weight determines the priority of this policy when multiple policies target the same resource.
+ Lower weight values take precedence. Defaults to 0.
+ format: int32
+ type: integer
type: object
status:
properties:
diff --git a/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml b/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml
index 511dff99d7..51898dcbb3 100644
--- a/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml
+++ b/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml
@@ -10634,6 +10634,13 @@ spec:
- secretName
type: object
type: array
+ weight:
+ default: 0
+ description: |-
+ Weight determines the priority of this policy when multiple policies target the same resource.
+ Lower weight values take precedence. Defaults to 0.
+ format: int32
+ type: integer
type: object
status:
properties:
diff --git a/docs/reference/api-reference/main.md b/docs/reference/api-reference/main.md
index c65d614344..ea44c8a217 100644
--- a/docs/reference/api-reference/main.md
+++ b/docs/reference/api-reference/main.md
@@ -2066,6 +2066,7 @@ StackConfigPolicy represents a StackConfigPolicy resource in a Kubernetes cluste
| Field | Description |
| --- | --- |
| *`resourceSelector`* __[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#labelselector-v1-meta)__ | |
+| *`weight`* __integer__ | Weight determines the priority of this policy when multiple policies target the same resource.
Lower weight values take precedence. Defaults to 0. |
| *`secureSettings`* __[SecretSource](#secretsource) array__ | Deprecated: SecureSettings only applies to Elasticsearch and is deprecated. It must be set per application instead. |
| *`elasticsearch`* __[ElasticsearchConfigPolicySpec](#elasticsearchconfigpolicyspec)__ | |
| *`kibana`* __[KibanaConfigPolicySpec](#kibanaconfigpolicyspec)__ | |
diff --git a/pkg/apis/stackconfigpolicy/v1alpha1/stackconfigpolicy_types.go b/pkg/apis/stackconfigpolicy/v1alpha1/stackconfigpolicy_types.go
index 058f197d23..e7d17e2007 100644
--- a/pkg/apis/stackconfigpolicy/v1alpha1/stackconfigpolicy_types.go
+++ b/pkg/apis/stackconfigpolicy/v1alpha1/stackconfigpolicy_types.go
@@ -55,6 +55,10 @@ type StackConfigPolicyList struct {
type StackConfigPolicySpec struct {
ResourceSelector metav1.LabelSelector `json:"resourceSelector,omitempty"`
+ // Weight determines the priority of this policy when multiple policies target the same resource.
+ // Lower weight values take precedence. Defaults to 0.
+ // +kubebuilder:default=0
+ Weight int32 `json:"weight,omitempty"`
// Deprecated: SecureSettings only applies to Elasticsearch and is deprecated. It must be set per application instead.
SecureSettings []commonv1.SecretSource `json:"secureSettings,omitempty"`
Elasticsearch ElasticsearchConfigPolicySpec `json:"elasticsearch,omitempty"`
diff --git a/pkg/controller/elasticsearch/filesettings/file_settings.go b/pkg/controller/elasticsearch/filesettings/file_settings.go
index 3bdbab5084..8f5d3653e7 100644
--- a/pkg/controller/elasticsearch/filesettings/file_settings.go
+++ b/pkg/controller/elasticsearch/filesettings/file_settings.go
@@ -7,12 +7,14 @@ package filesettings
import (
"fmt"
"path/filepath"
+ "sort"
"k8s.io/apimachinery/pkg/types"
commonv1 "github.com/elastic/cloud-on-k8s/v3/pkg/apis/common/v1"
policyv1alpha1 "github.com/elastic/cloud-on-k8s/v3/pkg/apis/stackconfigpolicy/v1alpha1"
"github.com/elastic/cloud-on-k8s/v3/pkg/controller/common/hash"
+ commonsettings "github.com/elastic/cloud-on-k8s/v3/pkg/controller/common/settings"
"github.com/elastic/cloud-on-k8s/v3/pkg/controller/common/version"
)
@@ -80,10 +82,39 @@ func newEmptySettingsState() SettingsState {
}
}
+// updateStateFromPolicies merges settings from multiple StackConfigPolicies based on their weights.
+// Lower weight policies override higher weight policies for conflicting settings.
+func (s *Settings) updateStateFromPolicies(es types.NamespacedName, policies []policyv1alpha1.StackConfigPolicy) error {
+ if len(policies) == 0 {
+ return nil
+ }
+
+ sortedPolicies := make([]policyv1alpha1.StackConfigPolicy, len(policies))
+ copy(sortedPolicies, policies)
+
+ // sort by weight (descending order)
+ sort.SliceStable(sortedPolicies, func(i, j int) bool {
+ return sortedPolicies[i].Spec.Weight > sortedPolicies[j].Spec.Weight
+ })
+
+ for _, policy := range sortedPolicies {
+ if err := s.updateState(es, policy); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
// updateState updates the Settings state from a StackConfigPolicy for a given Elasticsearch.
func (s *Settings) updateState(es types.NamespacedName, policy policyv1alpha1.StackConfigPolicy) error {
p := policy.DeepCopy() // be sure to not mutate the original policy
- state := newEmptySettingsState()
+
+ // Initialize state if not already done
+ if s.State.ClusterSettings == nil {
+ s.State = newEmptySettingsState()
+ }
+
// mutate Snapshot Repositories
if p.Spec.Elasticsearch.SnapshotRepositories != nil {
for name, untypedDefinition := range p.Spec.Elasticsearch.SnapshotRepositories.Data {
@@ -97,34 +128,97 @@ func (s *Settings) updateState(es types.NamespacedName, policy policyv1alpha1.St
}
p.Spec.Elasticsearch.SnapshotRepositories.Data[name] = repoSettings
}
- state.SnapshotRepositories = p.Spec.Elasticsearch.SnapshotRepositories
+ s.State.SnapshotRepositories = mergeConfig(s.State.SnapshotRepositories, p.Spec.Elasticsearch.SnapshotRepositories)
}
- // just copy other settings
if p.Spec.Elasticsearch.ClusterSettings != nil {
- state.ClusterSettings = p.Spec.Elasticsearch.ClusterSettings
+ s.State.ClusterSettings = mergeClusterConfig(s.State.ClusterSettings, p.Spec.Elasticsearch.ClusterSettings)
}
if p.Spec.Elasticsearch.SnapshotLifecyclePolicies != nil {
- state.SLM = p.Spec.Elasticsearch.SnapshotLifecyclePolicies
+ s.State.SLM = mergeConfig(s.State.SLM, p.Spec.Elasticsearch.SnapshotLifecyclePolicies)
}
if p.Spec.Elasticsearch.SecurityRoleMappings != nil {
- state.RoleMappings = p.Spec.Elasticsearch.SecurityRoleMappings
+ s.State.RoleMappings = mergeConfig(s.State.RoleMappings, p.Spec.Elasticsearch.SecurityRoleMappings)
}
if p.Spec.Elasticsearch.IndexLifecyclePolicies != nil {
- state.IndexLifecyclePolicies = p.Spec.Elasticsearch.IndexLifecyclePolicies
+ s.State.IndexLifecyclePolicies = mergeConfig(s.State.IndexLifecyclePolicies, p.Spec.Elasticsearch.IndexLifecyclePolicies)
}
if p.Spec.Elasticsearch.IngestPipelines != nil {
- state.IngestPipelines = p.Spec.Elasticsearch.IngestPipelines
+ s.State.IngestPipelines = mergeConfig(s.State.IngestPipelines, p.Spec.Elasticsearch.IngestPipelines)
}
if p.Spec.Elasticsearch.IndexTemplates.ComposableIndexTemplates != nil {
- state.IndexTemplates.ComposableIndexTemplates = p.Spec.Elasticsearch.IndexTemplates.ComposableIndexTemplates
+ s.State.IndexTemplates.ComposableIndexTemplates = mergeConfig(s.State.IndexTemplates.ComposableIndexTemplates, p.Spec.Elasticsearch.IndexTemplates.ComposableIndexTemplates)
}
if p.Spec.Elasticsearch.IndexTemplates.ComponentTemplates != nil {
- state.IndexTemplates.ComponentTemplates = p.Spec.Elasticsearch.IndexTemplates.ComponentTemplates
+ s.State.IndexTemplates.ComponentTemplates = mergeConfig(s.State.IndexTemplates.ComponentTemplates, p.Spec.Elasticsearch.IndexTemplates.ComponentTemplates)
}
- s.State = state
return nil
}
+// mergeClusterConfig merges source config into target config with flat/nested syntax support.
+// Both flat syntax (e.g., "cluster.routing.allocation.enable") and nested syntax
+// (e.g., {"cluster": {"routing": {"allocation": {"enable": "value"}}}}) are supported.
+// All settings are normalized to nested format for consistent output.
+func mergeClusterConfig(target, source *commonv1.Config) *commonv1.Config {
+ if source == nil || source.Data == nil {
+ return target
+ }
+ if target == nil || target.Data == nil {
+ target = &commonv1.Config{Data: make(map[string]interface{})}
+ }
+
+ // Convert to CanonicalConfig for proper dot notation handling
+ targetCanonical, err := commonsettings.NewCanonicalConfigFrom(target.Data)
+ if err != nil {
+ return target
+ }
+
+ sourceCanonical, err := commonsettings.NewCanonicalConfigFrom(source.Data)
+ if err != nil {
+ return target
+ }
+
+ // Merge with source taking precedence
+ err = targetCanonical.MergeWith(sourceCanonical)
+ if err != nil {
+ return target
+ }
+
+ // Convert back to commonv1.Config
+ var result map[string]interface{}
+ err = targetCanonical.Unpack(&result)
+ if err != nil {
+ return target
+ }
+
+ return &commonv1.Config{Data: result}
+}
+
+// mergeConfig merges source config into target config for non-cluster settings.
+// This is a simple merge without flat/nested syntax support since only ClusterSettings
+// support dot notation in Elasticsearch.
+func mergeConfig(target, source *commonv1.Config) *commonv1.Config {
+ if source == nil || source.Data == nil {
+ return target
+ }
+ if target == nil || target.Data == nil {
+ target = &commonv1.Config{Data: make(map[string]interface{})}
+ }
+
+ result := &commonv1.Config{Data: make(map[string]interface{})}
+
+ // Copy target data
+ for key, value := range target.Data {
+ result.Data[key] = value
+ }
+
+ // Merge source data, with source taking precedence
+ for key, value := range source.Data {
+ result.Data[key] = value
+ }
+
+ return result
+}
+
// mutateSnapshotRepositorySettings ensures that a snapshot repository can be used across multiple ES clusters.
// The namespace and the Elasticsearch cluster name are injected in the repository settings depending on the type of the repository:
// - "azure", "gcs", "s3": if not provided, the `base_path` property is set to `snapshots/-`
diff --git a/pkg/controller/elasticsearch/filesettings/file_settings_test.go b/pkg/controller/elasticsearch/filesettings/file_settings_test.go
index 4c0b9a862e..0ddfa6a0f3 100644
--- a/pkg/controller/elasticsearch/filesettings/file_settings_test.go
+++ b/pkg/controller/elasticsearch/filesettings/file_settings_test.go
@@ -405,7 +405,7 @@ func Test_updateState(t *testing.T) {
wantErr: errors.New("invalid type (float64) for snapshot repository path"),
},
{
- name: "other settings: no mutation",
+ name: "other settings: configuration normalization",
args: args{policy: policyv1alpha1.StackConfigPolicy{Spec: policyv1alpha1.StackConfigPolicySpec{Elasticsearch: policyv1alpha1.ElasticsearchConfigPolicySpec{
ClusterSettings: clusterSettings,
SnapshotRepositories: &commonv1.Config{Data: map[string]any{}},
@@ -419,7 +419,13 @@ func Test_updateState(t *testing.T) {
},
}}}},
want: SettingsState{
- ClusterSettings: clusterSettings,
+ ClusterSettings: &commonv1.Config{Data: map[string]any{
+ "indices": map[string]any{
+ "recovery": map[string]any{
+ "max_bytes_per_sec": "100mb",
+ },
+ },
+ }},
SnapshotRepositories: &commonv1.Config{Data: map[string]any{}},
SLM: snapshotLifecyclePolicies,
RoleMappings: roleMappings,
@@ -449,3 +455,156 @@ func Test_updateState(t *testing.T) {
})
}
}
+
+// TestMergeConfigWithNormalization tests the mergeClusterConfig function with various flat and nested syntax scenarios
+func TestMergeConfigWithNormalization(t *testing.T) {
+ tests := []struct {
+ name string
+ target *commonv1.Config
+ source *commonv1.Config
+ expected *commonv1.Config
+ }{
+ {
+ name: "flat syntax merged with nested syntax",
+ target: &commonv1.Config{Data: map[string]interface{}{
+ "cluster.routing.allocation.enable": "all",
+ }},
+ source: &commonv1.Config{Data: map[string]interface{}{
+ "cluster": map[string]interface{}{
+ "routing": map[string]interface{}{
+ "allocation": map[string]interface{}{
+ "disk": map[string]interface{}{
+ "watermark": map[string]interface{}{
+ "low": "85%",
+ },
+ },
+ },
+ },
+ },
+ }},
+ expected: &commonv1.Config{Data: map[string]interface{}{
+ "cluster": map[string]interface{}{
+ "routing": map[string]interface{}{
+ "allocation": map[string]interface{}{
+ "enable": "all",
+ "disk": map[string]interface{}{
+ "watermark": map[string]interface{}{
+ "low": "85%",
+ },
+ },
+ },
+ },
+ },
+ }},
+ },
+ {
+ name: "nested syntax merged with flat syntax",
+ target: &commonv1.Config{Data: map[string]interface{}{
+ "cluster": map[string]interface{}{
+ "routing": map[string]interface{}{
+ "allocation": map[string]interface{}{
+ "enable": "all",
+ },
+ },
+ },
+ }},
+ source: &commonv1.Config{Data: map[string]interface{}{
+ "cluster.routing.allocation.disk.watermark.low": "85%",
+ }},
+ expected: &commonv1.Config{Data: map[string]interface{}{
+ "cluster": map[string]interface{}{
+ "routing": map[string]interface{}{
+ "allocation": map[string]interface{}{
+ "enable": "all",
+ "disk": map[string]interface{}{
+ "watermark": map[string]interface{}{
+ "low": "85%",
+ },
+ },
+ },
+ },
+ },
+ }},
+ },
+ {
+ name: "flat syntax conflict - source takes precedence",
+ target: &commonv1.Config{Data: map[string]interface{}{
+ "cluster.routing.allocation.enable": "all",
+ }},
+ source: &commonv1.Config{Data: map[string]interface{}{
+ "cluster.routing.allocation.enable": "primaries",
+ }},
+ expected: &commonv1.Config{Data: map[string]interface{}{
+ "cluster": map[string]interface{}{
+ "routing": map[string]interface{}{
+ "allocation": map[string]interface{}{
+ "enable": "primaries",
+ },
+ },
+ },
+ }},
+ },
+ {
+ name: "mixed flat and nested with different paths",
+ target: &commonv1.Config{Data: map[string]interface{}{
+ "indices.recovery.max_bytes_per_sec": "100mb",
+ "cluster": map[string]interface{}{
+ "routing": map[string]interface{}{
+ "allocation": map[string]interface{}{
+ "enable": "all",
+ },
+ },
+ },
+ }},
+ source: &commonv1.Config{Data: map[string]interface{}{
+ "indices.memory.index_buffer_size": "10%",
+ "cluster.routing.rebalance.enable": "replicas",
+ }},
+ expected: &commonv1.Config{Data: map[string]interface{}{
+ "cluster": map[string]interface{}{
+ "routing": map[string]interface{}{
+ "allocation": map[string]interface{}{
+ "enable": "all",
+ },
+ "rebalance": map[string]interface{}{
+ "enable": "replicas",
+ },
+ },
+ },
+ "indices": map[string]interface{}{
+ "recovery": map[string]interface{}{
+ "max_bytes_per_sec": "100mb",
+ },
+ "memory": map[string]interface{}{
+ "index_buffer_size": "10%",
+ },
+ },
+ }},
+ },
+ {
+ name: "nil source returns target unchanged",
+ target: &commonv1.Config{Data: map[string]interface{}{"test": "value"}},
+ source: nil,
+ expected: &commonv1.Config{Data: map[string]interface{}{
+ "test": "value",
+ }},
+ },
+ {
+ name: "nil target returns source as target",
+ target: nil,
+ source: &commonv1.Config{Data: map[string]interface{}{"test": "value"}},
+ expected: &commonv1.Config{Data: map[string]interface{}{
+ "test": "value",
+ }},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := mergeClusterConfig(tt.target, tt.source)
+ if !reflect.DeepEqual(result.Data, tt.expected.Data) {
+ t.Errorf("mergeClusterConfig() = %+v, want %+v\nDiff: %s", result.Data, tt.expected.Data, cmp.Diff(tt.expected.Data, result.Data))
+ }
+ })
+ }
+}
diff --git a/pkg/controller/elasticsearch/filesettings/secret.go b/pkg/controller/elasticsearch/filesettings/secret.go
index 69e9604d4b..b313852a25 100644
--- a/pkg/controller/elasticsearch/filesettings/secret.go
+++ b/pkg/controller/elasticsearch/filesettings/secret.go
@@ -39,6 +39,85 @@ func NewSettingsSecretWithVersion(es types.NamespacedName, currentSecret *corev1
return newSettingsSecret(newVersion, es, currentSecret, policy, meta)
}
+// NewSettingsSecretWithVersionFromPolicies returns a new SettingsSecret for a given Elasticsearch from multiple policies.
+// Policies are merged based on their weights, with higher weights taking precedence.
+func NewSettingsSecretWithVersionFromPolicies(es types.NamespacedName, currentSecret *corev1.Secret, policies []policyv1alpha1.StackConfigPolicy, meta metadata.Metadata) (corev1.Secret, int64, error) {
+ newVersion := time.Now().UnixNano()
+ return NewSettingsSecretFromPolicies(newVersion, es, currentSecret, policies, meta)
+}
+
+// NewSettingsSecretFromPolicies returns a new SettingsSecret for a given Elasticsearch from multiple StackConfigPolicies.
+func NewSettingsSecretFromPolicies(version int64, es types.NamespacedName, currentSecret *corev1.Secret, policies []policyv1alpha1.StackConfigPolicy, meta metadata.Metadata) (corev1.Secret, int64, error) {
+ settings := NewEmptySettings(version)
+
+ // update the settings according to the config policies
+ if len(policies) > 0 {
+ err := settings.updateStateFromPolicies(es, policies)
+ if err != nil {
+ return corev1.Secret{}, 0, err
+ }
+ }
+
+ // do not update version if hash hasn't changed
+ if currentSecret != nil && !hasChanged(*currentSecret, settings) {
+ currentVersion, err := extractVersion(*currentSecret)
+ if err != nil {
+ return corev1.Secret{}, 0, err
+ }
+
+ version = currentVersion
+ settings.Metadata.Version = strconv.FormatInt(currentVersion, 10)
+ }
+
+ // prepare the SettingsSecret
+ secretMeta := meta.Merge(metadata.Metadata{
+ Annotations: map[string]string{
+ commonannotation.SettingsHashAnnotationName: settings.hash(),
+ },
+ })
+ settingsBytes, err := json.Marshal(settings)
+ if err != nil {
+ return corev1.Secret{}, 0, err
+ }
+ settingsSecret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: es.Namespace,
+ Name: esv1.FileSettingsSecretName(es.Name),
+ Labels: secretMeta.Labels,
+ Annotations: secretMeta.Annotations,
+ },
+ Data: map[string][]byte{
+ SettingsSecretKey: settingsBytes,
+ },
+ }
+
+ // Store all policy references in the secret
+ policyRefs := make([]PolicyRef, 0, len(policies))
+ for _, policy := range policies {
+ policyRefs = append(policyRefs, PolicyRef{
+ Name: policy.Name,
+ Namespace: policy.Namespace,
+ Weight: policy.Spec.Weight,
+ })
+ }
+ if err := SetPolicyRefs(settingsSecret, policyRefs); err != nil {
+ return corev1.Secret{}, 0, err
+ }
+
+ // Add secure settings from all policies
+ if err := setSecureSettingsFromPolicies(settingsSecret, policies); err != nil {
+ return corev1.Secret{}, 0, err
+ }
+
+ // Add a label to reset secret on deletion of the stack config policy
+ if settingsSecret.Labels == nil {
+ settingsSecret.Labels = make(map[string]string)
+ }
+ settingsSecret.Labels[commonlabel.StackConfigPolicyOnDeleteLabelName] = commonlabel.OrphanSecretResetOnPolicyDelete
+
+ return *settingsSecret, version, nil
+}
+
// NewSettingsSecret returns a new SettingsSecret for a given Elasticsearch and StackConfigPolicy.
func newSettingsSecret(version int64, es types.NamespacedName, currentSecret *corev1.Secret, policy *policyv1alpha1.StackConfigPolicy, meta metadata.Metadata) (corev1.Secret, int64, error) {
settings := NewEmptySettings(version)
@@ -160,6 +239,35 @@ func setSecureSettings(settingsSecret *corev1.Secret, policy policyv1alpha1.Stac
return nil
}
+// setSecureSettingsFromPolicies sets secure settings from multiple policies into the settings secret
+func setSecureSettingsFromPolicies(settingsSecret *corev1.Secret, policies []policyv1alpha1.StackConfigPolicy) error {
+ var allSecretSources []commonv1.NamespacedSecretSource
+
+ for _, policy := range policies {
+ // Common secureSettings field, this is mainly there to maintain backwards compatibility
+ //nolint:staticcheck
+ for _, src := range policy.Spec.SecureSettings {
+ allSecretSources = append(allSecretSources, commonv1.NamespacedSecretSource{Namespace: policy.GetNamespace(), SecretName: src.SecretName, Entries: src.Entries})
+ }
+
+ // SecureSettings field under Elasticsearch in the StackConfigPolicy
+ for _, src := range policy.Spec.Elasticsearch.SecureSettings {
+ allSecretSources = append(allSecretSources, commonv1.NamespacedSecretSource{Namespace: policy.GetNamespace(), SecretName: src.SecretName, Entries: src.Entries})
+ }
+ }
+
+ if len(allSecretSources) == 0 {
+ return nil
+ }
+
+ bytes, err := json.Marshal(allSecretSources)
+ if err != nil {
+ return err
+ }
+ settingsSecret.Annotations[commonannotation.SecureSettingsSecretsAnnotationName] = string(bytes)
+ return nil
+}
+
// CanBeOwnedBy return true if the Settings Secret can be owned by the given StackConfigPolicy, either because the Secret
// belongs to no one or because it already belongs to the given policy.
func CanBeOwnedBy(settingsSecret corev1.Secret, policy policyv1alpha1.StackConfigPolicy) (reconciler.SoftOwnerRef, bool) {
@@ -173,6 +281,94 @@ func CanBeOwnedBy(settingsSecret corev1.Secret, policy policyv1alpha1.StackConfi
return currentOwner, canBeOwned
}
+// PolicyRef represents a reference to a StackConfigPolicy with its weight
+type PolicyRef struct {
+ Name string
+ Namespace string
+ Weight int32
+}
+
+// GetPolicyRefs extracts all policy references from a secret's annotations
+func GetPolicyRefs(secret corev1.Secret) ([]PolicyRef, error) {
+ if secret.Annotations == nil {
+ return nil, nil
+ }
+
+ policiesData, ok := secret.Annotations["stackconfigpolicy.k8s.elastic.co/policies"]
+ if !ok {
+ return nil, nil
+ }
+
+ var policies []PolicyRef
+ if err := json.Unmarshal([]byte(policiesData), &policies); err != nil {
+ return nil, err
+ }
+
+ return policies, nil
+}
+
+// SetPolicyRefs stores policy references in a secret's annotations
+func SetPolicyRefs(secret *corev1.Secret, policies []PolicyRef) error {
+ if secret.Annotations == nil {
+ secret.Annotations = make(map[string]string)
+ }
+
+ data, err := json.Marshal(policies)
+ if err != nil {
+ return err
+ }
+
+ secret.Annotations["stackconfigpolicy.k8s.elastic.co/policies"] = string(data)
+ return nil
+}
+
+// AddOrUpdatePolicyRef adds or updates a policy reference in the secret
+func AddOrUpdatePolicyRef(secret *corev1.Secret, policy policyv1alpha1.StackConfigPolicy) error {
+ policies, err := GetPolicyRefs(*secret)
+ if err != nil {
+ return err
+ }
+
+ policyRef := PolicyRef{
+ Name: policy.Name,
+ Namespace: policy.Namespace,
+ Weight: policy.Spec.Weight,
+ }
+
+ // Update existing policy or add new one
+ found := false
+ for i, p := range policies {
+ if p.Name == policy.Name && p.Namespace == policy.Namespace {
+ policies[i] = policyRef
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ policies = append(policies, policyRef)
+ }
+
+ return SetPolicyRefs(secret, policies)
+}
+
+// RemovePolicyRef removes a policy reference from the secret
+func RemovePolicyRef(secret *corev1.Secret, policyName, policyNamespace string) error {
+ policies, err := GetPolicyRefs(*secret)
+ if err != nil {
+ return err
+ }
+
+ var filtered []PolicyRef
+ for _, p := range policies {
+ if !(p.Name == policyName && p.Namespace == policyNamespace) {
+ filtered = append(filtered, p)
+ }
+ }
+
+ return SetPolicyRefs(secret, filtered)
+}
+
// getSecureSettings returns the SecureSettings Secret sources stores in an annotation of the given file settings Secret.
func getSecureSettings(settingsSecret corev1.Secret) ([]commonv1.NamespacedSecretSource, error) {
rawString, ok := settingsSecret.Annotations[commonannotation.SecureSettingsSecretsAnnotationName]
diff --git a/pkg/controller/stackconfigpolicy/controller.go b/pkg/controller/stackconfigpolicy/controller.go
index b2cefc8ea5..87d3ef38de 100644
--- a/pkg/controller/stackconfigpolicy/controller.go
+++ b/pkg/controller/stackconfigpolicy/controller.go
@@ -17,6 +17,8 @@ import (
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -233,6 +235,13 @@ func (r *ReconcileStackConfigPolicy) doReconcile(ctx context.Context, policy pol
return results.WithError(err), status
}
+ // check for weight conflicts with other policies
+ if err := r.checkWeightConflicts(ctx, &policy); err != nil {
+ status.Phase = policyv1alpha1.ConflictPhase
+ r.recorder.Eventf(&policy, corev1.EventTypeWarning, events.EventReasonValidation, err.Error())
+ return results.WithError(err), status
+ }
+
// reconcile elasticsearch resources
results, status = r.reconcileElasticsearchResources(ctx, policy, status)
@@ -250,6 +259,64 @@ func (r *ReconcileStackConfigPolicy) doReconcile(ctx context.Context, policy pol
return results, status
}
+// findPoliciesForElasticsearch finds all StackConfigPolicies that target a given Elasticsearch cluster
+func (r *ReconcileStackConfigPolicy) findPoliciesForElasticsearch(ctx context.Context, es esv1.Elasticsearch) ([]policyv1alpha1.StackConfigPolicy, error) {
+ var allPolicies policyv1alpha1.StackConfigPolicyList
+ err := r.Client.List(ctx, &allPolicies)
+ if err != nil {
+ return nil, err
+ }
+
+ var matchingPolicies []policyv1alpha1.StackConfigPolicy
+ for _, policy := range allPolicies.Items {
+ // Check if policy's resource selector matches this Elasticsearch
+ selector, err := metav1.LabelSelectorAsSelector(&policy.Spec.ResourceSelector)
+ if err != nil {
+ continue // Skip malformed selectors
+ }
+
+ // Check namespace restrictions
+ if policy.Namespace != r.params.OperatorNamespace && policy.Namespace != es.Namespace {
+ continue
+ }
+
+ if selector.Matches(labels.Set(es.Labels)) {
+ matchingPolicies = append(matchingPolicies, policy)
+ }
+ }
+
+ return matchingPolicies, nil
+}
+
+// findPoliciesForKibana finds all StackConfigPolicies that target a given Kibana instance
+func (r *ReconcileStackConfigPolicy) findPoliciesForKibana(ctx context.Context, kibana kibanav1.Kibana) ([]policyv1alpha1.StackConfigPolicy, error) {
+ var allPolicies policyv1alpha1.StackConfigPolicyList
+ err := r.Client.List(ctx, &allPolicies)
+ if err != nil {
+ return nil, err
+ }
+
+ var matchingPolicies []policyv1alpha1.StackConfigPolicy
+ for _, policy := range allPolicies.Items {
+ // Check if policy's resource selector matches this Kibana
+ selector, err := metav1.LabelSelectorAsSelector(&policy.Spec.ResourceSelector)
+ if err != nil {
+ continue // Skip malformed selectors
+ }
+
+ // Check namespace restrictions
+ if policy.Namespace != r.params.OperatorNamespace && policy.Namespace != kibana.Namespace {
+ continue
+ }
+
+ if selector.Matches(labels.Set(kibana.Labels)) {
+ matchingPolicies = append(matchingPolicies, policy)
+ }
+ }
+
+ return matchingPolicies, nil
+}
+
func (r *ReconcileStackConfigPolicy) reconcileElasticsearchResources(ctx context.Context, policy policyv1alpha1.StackConfigPolicy, status policyv1alpha1.StackConfigPolicyStatus) (*reconciler.Results, policyv1alpha1.StackConfigPolicyStatus) {
defer tracing.Span(&ctx)()
log := ulog.FromContext(ctx)
@@ -314,23 +381,29 @@ func (r *ReconcileStackConfigPolicy) reconcileElasticsearchResources(ctx context
return results.WithError(err), status
}
- // check that there is no other policy that already owns the Settings Secret
- currentOwner, ok := filesettings.CanBeOwnedBy(actualSettingsSecret, policy)
- if !ok {
- err = fmt.Errorf("conflict: resource Elasticsearch %s/%s already configured by StackConfigpolicy %s/%s", es.Namespace, es.Name, currentOwner.Namespace, currentOwner.Name)
- r.recorder.Eventf(&policy, corev1.EventTypeWarning, events.EventReasonUnexpected, err.Error())
- results.WithError(err)
- err = status.AddPolicyErrorFor(esNsn, policyv1alpha1.ConflictPhase, err.Error(), policyv1alpha1.ElasticsearchResourceType)
- if err != nil {
- return results.WithError(err), status
- }
- continue
+ // Find all policies that target this Elasticsearch cluster
+ allPolicies, err := r.findPoliciesForElasticsearch(ctx, es)
+ if err != nil {
+ return results.WithError(err), status
}
// extract the metadata that should be propagated to children
meta := metadata.Propagate(&es, metadata.Metadata{Labels: eslabel.NewLabels(k8s.ExtractNamespacedName(&es))})
- // create the expected Settings Secret
- expectedSecret, expectedVersion, err := filesettings.NewSettingsSecretWithVersion(esNsn, &actualSettingsSecret, &policy, meta)
+
+ // create the expected Settings Secret from all applicable policies
+ var expectedSecret corev1.Secret
+ var expectedVersion int64
+ switch len(allPolicies) {
+ case 0:
+ // No policies target this resource - skip (shouldn't happen in practice)
+ continue
+ case 1:
+ // Single policy - use the original approach for backward compatibility
+ expectedSecret, expectedVersion, err = filesettings.NewSettingsSecretWithVersion(esNsn, &actualSettingsSecret, &allPolicies[0], meta)
+ default:
+ // Multiple policies - use the multi-policy approach
+ expectedSecret, expectedVersion, err = filesettings.NewSettingsSecretWithVersionFromPolicies(esNsn, &actualSettingsSecret, allPolicies, meta)
+ }
if err != nil {
return results.WithError(err), status
}
@@ -339,8 +412,8 @@ func (r *ReconcileStackConfigPolicy) reconcileElasticsearchResources(ctx context
return results.WithError(err), status
}
- // Copy all the Secrets that are present in spec.elasticsearch.secretMounts
- if err := reconcileSecretMounts(ctx, r.Client, es, &policy, meta); err != nil {
+ // Handle secret mounts and config from all policies
+ if err := r.reconcileSecretMountsFromPolicies(ctx, es, allPolicies, meta); err != nil {
if apierrors.IsNotFound(err) {
err = status.AddPolicyErrorFor(esNsn, policyv1alpha1.ErrorPhase, err.Error(), policyv1alpha1.ElasticsearchResourceType)
if err != nil {
@@ -351,8 +424,8 @@ func (r *ReconcileStackConfigPolicy) reconcileElasticsearchResources(ctx context
continue
}
- // create expected elasticsearch config secret
- expectedConfigSecret, err := newElasticsearchConfigSecret(policy, es)
+ // create expected elasticsearch config secret from all policies
+ expectedConfigSecret, err := r.newElasticsearchConfigSecretFromPolicies(allPolicies, es)
if err != nil {
return results.WithError(err), status
}
@@ -361,8 +434,8 @@ func (r *ReconcileStackConfigPolicy) reconcileElasticsearchResources(ctx context
return results.WithError(err), status
}
- // Check if required Elasticsearch config and secret mounts are applied.
- configAndSecretMountsApplied, err := elasticsearchConfigAndSecretMountsApplied(ctx, r.Client, policy, es)
+ // Check if required Elasticsearch config and secret mounts are applied from all policies.
+ configAndSecretMountsApplied, err := r.elasticsearchConfigAndSecretMountsAppliedFromPolicies(ctx, allPolicies, es)
if err != nil {
return results.WithError(err), status
}
@@ -433,29 +506,40 @@ func (r *ReconcileStackConfigPolicy) reconcileKibanaResources(ctx context.Contex
// keep the list of Kibana to be configured
kibanaNsn := k8s.ExtractNamespacedName(&kibana)
- // check that there is no other policy that already owns the kibana config secret
- currentOwner, ok, err := canBeOwned(ctx, r.Client, policy, kibana)
+ // Find all policies that target this Kibana instance
+ allPolicies, err := r.findPoliciesForKibana(ctx, kibana)
if err != nil {
return results.WithError(err), status
}
- // record error if already owned by another stack config policy
- if !ok {
- err := fmt.Errorf("conflict: resource Kibana %s/%s already configured by StackConfigpolicy %s/%s", kibana.Namespace, kibana.Name, currentOwner.Namespace, currentOwner.Name)
- r.recorder.Eventf(&policy, corev1.EventTypeWarning, events.EventReasonUnexpected, err.Error())
- results.WithError(err)
- if err := status.AddPolicyErrorFor(kibanaNsn, policyv1alpha1.ConflictPhase, err.Error(), policyv1alpha1.KibanaResourceType); err != nil {
- return results.WithError(err), status
+ // Check if any policy has Kibana config
+ hasKibanaConfig := false
+ for _, p := range allPolicies {
+ if p.Spec.Kibana.Config != nil {
+ hasKibanaConfig = true
+ break
}
- continue
}
- // Create the Secret that holds the Kibana configuration.
- if policy.Spec.Kibana.Config != nil {
- // Only add to configured resources if Kibana config is set.
- // This will help clean up the config secret if config gets removed from the stack config policy.
+ // Create the Secret that holds the Kibana configuration from all policies.
+ if hasKibanaConfig {
+ // Only add to configured resources if at least one policy has Kibana config set.
configuredResources[kibanaNsn] = kibana
- expectedConfigSecret, err := newKibanaConfigSecret(policy, kibana)
+
+ var expectedConfigSecret corev1.Secret
+
+ switch len(allPolicies) {
+ case 0:
+ // No policies target this resource - skip (shouldn't happen in practice)
+ continue
+ case 1:
+ // Single policy - use the original approach for backward compatibility
+ expectedConfigSecret, err = newKibanaConfigSecret(allPolicies[0], kibana)
+ default:
+ // Multiple policies - use the multi-policy approach
+ expectedConfigSecret, err = r.newKibanaConfigSecretFromPolicies(allPolicies, kibana)
+ }
+
if err != nil {
return results.WithError(err), status
}
@@ -465,8 +549,18 @@ func (r *ReconcileStackConfigPolicy) reconcileKibanaResources(ctx context.Contex
}
}
- // Check if required Kibana configs are applied.
- configApplied, err := kibanaConfigApplied(r.Client, policy, kibana)
+ // Check if required Kibana configs from all policies are applied.
+ var configApplied bool
+
+ switch len(allPolicies) {
+ case 0:
+ // No policies, so nothing to apply
+ configApplied = true
+ case 1:
+ configApplied, err = kibanaConfigApplied(r.Client, allPolicies[0], kibana)
+ default:
+ configApplied, err = r.kibanaConfigAppliedFromPolicies(allPolicies, kibana)
+ }
if err != nil {
return results.WithError(err), status
}
@@ -757,3 +851,265 @@ func (r *ReconcileStackConfigPolicy) addDynamicWatchesOnAdditionalSecretMounts(p
func additionalSecretMountsWatcherName(watcher types.NamespacedName) string {
return fmt.Sprintf("%s-%s-additional-secret-mounts-watcher", watcher.Name, watcher.Namespace)
}
+
+// checkWeightConflicts validates that no other StackConfigPolicy has the same weight
+// and would create conflicting configuration for the same resources
+func (r *ReconcileStackConfigPolicy) checkWeightConflicts(ctx context.Context, policy *policyv1alpha1.StackConfigPolicy) error {
+ var allPolicies policyv1alpha1.StackConfigPolicyList
+ if err := r.Client.List(ctx, &allPolicies); err != nil {
+ return fmt.Errorf("failed to list StackConfigPolicies for weight conflict check: %w", err)
+ }
+
+ policySelector, err := metav1.LabelSelectorAsSelector(&policy.Spec.ResourceSelector)
+ if err != nil {
+ return fmt.Errorf("invalid resource selector: %w", err)
+ }
+
+ // Group policies by weight to detect conflicts more efficiently
+ policiesByWeight := make(map[int32][]policyv1alpha1.StackConfigPolicy)
+ for _, otherPolicy := range allPolicies.Items {
+ // Skip self
+ if otherPolicy.Namespace == policy.Namespace && otherPolicy.Name == policy.Name {
+ continue
+ }
+ policiesByWeight[otherPolicy.Spec.Weight] = append(policiesByWeight[otherPolicy.Spec.Weight], otherPolicy)
+ }
+
+ // Check if any policies with the same weight could target overlapping resources and have conflicting settings
+ conflictingPolicies := policiesByWeight[policy.Spec.Weight]
+ if len(conflictingPolicies) == 0 {
+ return nil // No conflicts
+ }
+
+ for _, otherPolicy := range conflictingPolicies {
+ if r.policiesCouldOverlap(policy, &otherPolicy, policySelector) {
+ // Check if the policies have conflicting settings
+ if r.policiesHaveConflictingSettings(policy, &otherPolicy) {
+ return fmt.Errorf("weight conflict detected: StackConfigPolicy %s/%s has the same weight (%d) and would overwrite conflicting settings. Policies with the same weight that target overlapping resources must configure different, non-conflicting settings",
+ otherPolicy.Namespace, otherPolicy.Name, policy.Spec.Weight)
+ }
+ }
+ }
+
+ return nil
+}
+
+// policiesHaveConflictingSettings checks if two policies would configure conflicting settings
+// that would overwrite each other. Returns true if both policies configure the same setting keys,
+// or if both policies have completely empty configurations (to maintain existing behavior).
+func (r *ReconcileStackConfigPolicy) policiesHaveConflictingSettings(policy1, policy2 *policyv1alpha1.StackConfigPolicy) bool {
+ // Check if both policies are essentially empty (no meaningful configuration)
+ if r.policyIsEmpty(policy1) && r.policyIsEmpty(policy2) {
+ return true // Both empty policies would conflict in the same namespace/selectors
+ }
+
+ // Check Elasticsearch settings for conflicts
+ if r.elasticsearchSettingsConflict(policy1, policy2) {
+ return true
+ }
+
+ // Check Kibana settings for conflicts
+ if r.kibanaSettingsConflict(policy1, policy2) {
+ return true
+ }
+
+ return false
+}
+
+// policyIsEmpty checks if a policy has no meaningful configuration
+func (r *ReconcileStackConfigPolicy) policyIsEmpty(policy *policyv1alpha1.StackConfigPolicy) bool {
+ es := &policy.Spec.Elasticsearch
+ kb := &policy.Spec.Kibana
+
+ // Check if Elasticsearch settings are empty
+ esEmpty := (es.ClusterSettings == nil || len(es.ClusterSettings.Data) == 0) &&
+ (es.SnapshotRepositories == nil || len(es.SnapshotRepositories.Data) == 0) &&
+ (es.SnapshotLifecyclePolicies == nil || len(es.SnapshotLifecyclePolicies.Data) == 0) &&
+ (es.SecurityRoleMappings == nil || len(es.SecurityRoleMappings.Data) == 0) &&
+ (es.IndexLifecyclePolicies == nil || len(es.IndexLifecyclePolicies.Data) == 0) &&
+ (es.IngestPipelines == nil || len(es.IngestPipelines.Data) == 0) &&
+ (es.IndexTemplates.ComponentTemplates == nil || len(es.IndexTemplates.ComponentTemplates.Data) == 0) &&
+ (es.IndexTemplates.ComposableIndexTemplates == nil || len(es.IndexTemplates.ComposableIndexTemplates.Data) == 0) &&
+ (es.Config == nil || len(es.Config.Data) == 0) &&
+ len(es.SecretMounts) == 0
+
+ // Check if Kibana settings are empty
+ kbEmpty := (kb.Config == nil || len(kb.Config.Data) == 0)
+
+ return esEmpty && kbEmpty
+}
+
+// elasticsearchSettingsConflict checks if two policies have conflicting Elasticsearch settings
+func (r *ReconcileStackConfigPolicy) elasticsearchSettingsConflict(policy1, policy2 *policyv1alpha1.StackConfigPolicy) bool {
+ es1 := &policy1.Spec.Elasticsearch
+ es2 := &policy2.Spec.Elasticsearch
+
+ // Check each type of setting for key conflicts
+ if r.configsConflict(es1.ClusterSettings, es2.ClusterSettings) {
+ return true
+ }
+ if r.configsConflict(es1.SnapshotRepositories, es2.SnapshotRepositories) {
+ return true
+ }
+ if r.configsConflict(es1.SnapshotLifecyclePolicies, es2.SnapshotLifecyclePolicies) {
+ return true
+ }
+ if r.configsConflict(es1.SecurityRoleMappings, es2.SecurityRoleMappings) {
+ return true
+ }
+ if r.configsConflict(es1.IndexLifecyclePolicies, es2.IndexLifecyclePolicies) {
+ return true
+ }
+ if r.configsConflict(es1.IngestPipelines, es2.IngestPipelines) {
+ return true
+ }
+ if r.configsConflict(es1.IndexTemplates.ComponentTemplates, es2.IndexTemplates.ComponentTemplates) {
+ return true
+ }
+ if r.configsConflict(es1.IndexTemplates.ComposableIndexTemplates, es2.IndexTemplates.ComposableIndexTemplates) {
+ return true
+ }
+ if r.configsConflict(es1.Config, es2.Config) {
+ return true
+ }
+
+ // Check secret mounts for path conflicts
+ return r.secretMountsConflict(es1.SecretMounts, es2.SecretMounts)
+}
+
+// kibanaSettingsConflict checks if two policies have conflicting Kibana settings
+func (r *ReconcileStackConfigPolicy) kibanaSettingsConflict(policy1, policy2 *policyv1alpha1.StackConfigPolicy) bool {
+ kb1 := &policy1.Spec.Kibana
+ kb2 := &policy2.Spec.Kibana
+
+ return r.configsConflict(kb1.Config, kb2.Config)
+}
+
+// configsConflict checks if two Config objects have overlapping keys
+func (r *ReconcileStackConfigPolicy) configsConflict(config1, config2 *commonv1.Config) bool {
+ if config1 == nil || config2 == nil || config1.Data == nil || config2.Data == nil {
+ return false
+ }
+
+ // Check if there are any common keys
+ for key := range config1.Data {
+ if _, exists := config2.Data[key]; exists {
+ return true
+ }
+ }
+
+ return false
+}
+
+// secretMountsConflict checks if two sets of secret mounts have overlapping mount paths
+func (r *ReconcileStackConfigPolicy) secretMountsConflict(mounts1, mounts2 []policyv1alpha1.SecretMount) bool {
+ if len(mounts1) == 0 || len(mounts2) == 0 {
+ return false
+ }
+
+ paths1 := make(map[string]bool)
+ for _, mount := range mounts1 {
+ paths1[mount.MountPath] = true
+ }
+
+ for _, mount := range mounts2 {
+ if paths1[mount.MountPath] {
+ return true
+ }
+ }
+
+ return false
+}
+
+// policiesCouldOverlap checks if two policies could potentially target the same resources
+func (r *ReconcileStackConfigPolicy) policiesCouldOverlap(policy1, policy2 *policyv1alpha1.StackConfigPolicy, policy1Selector labels.Selector) bool {
+ // Check namespace-based restrictions first
+ if !r.namespacesCouldOverlap(policy1.Namespace, policy2.Namespace) {
+ return false
+ }
+
+ // Parse policy2 selector
+ policy2Selector, err := metav1.LabelSelectorAsSelector(&policy2.Spec.ResourceSelector)
+ if err != nil {
+ // If we can't parse the selector, assume they could overlap to be safe
+ return true
+ }
+
+ // Check if selectors could match the same labels
+ return r.selectorsCouldOverlap(policy1Selector, policy2Selector)
+}
+
+// namespacesCouldOverlap checks if two policies from different namespaces could target the same resources
+// Based on the controller logic in reconcileElasticsearchResources and reconcileKibanaResources
+func (r *ReconcileStackConfigPolicy) namespacesCouldOverlap(ns1, ns2 string) bool {
+ // If both policies are in the same namespace, they can overlap
+ if ns1 == ns2 {
+ return true
+ }
+
+ // Check if either policy is in the operator namespace (can target resources in other namespaces)
+ if ns1 == r.params.OperatorNamespace || ns2 == r.params.OperatorNamespace {
+ return true
+ }
+
+ // Policies from different non-operator namespaces cannot overlap
+ return false
+}
+
+// selectorsCouldOverlap checks if two label selectors could potentially match the same resources
+func (r *ReconcileStackConfigPolicy) selectorsCouldOverlap(selector1, selector2 labels.Selector) bool {
+ // If either selector matches everything, they overlap
+ if selector1.Empty() || selector2.Empty() {
+ return true
+ }
+
+ // Get requirements for both selectors
+ reqs1, _ := selector1.Requirements()
+ reqs2, _ := selector2.Requirements()
+
+ // Create maps for easier lookup
+ equalsReqs1 := make(map[string]map[string]bool)
+ equalsReqs2 := make(map[string]map[string]bool)
+
+ for _, req := range reqs1 {
+ if req.Operator() == selection.Equals {
+ if equalsReqs1[req.Key()] == nil {
+ equalsReqs1[req.Key()] = make(map[string]bool)
+ }
+ for v := range req.Values() {
+ equalsReqs1[req.Key()][v] = true
+ }
+ }
+ }
+
+ for _, req := range reqs2 {
+ if req.Operator() == selection.Equals {
+ if equalsReqs2[req.Key()] == nil {
+ equalsReqs2[req.Key()] = make(map[string]bool)
+ }
+ for v := range req.Values() {
+ equalsReqs2[req.Key()][v] = true
+ }
+ }
+ }
+
+ // Check for definitely disjoint selectors
+ for key, values1 := range equalsReqs1 {
+ if values2, exists := equalsReqs2[key]; exists {
+ // Both selectors require the same key - check if value sets overlap
+ hasOverlap := false
+ for v := range values1 {
+ if values2[v] {
+ hasOverlap = true
+ break
+ }
+ }
+ if !hasOverlap {
+ return false // Definitely no overlap for this key
+ }
+ }
+ }
+
+ // If we can't prove they're disjoint, assume they could overlap
+ return true
+}
diff --git a/pkg/controller/stackconfigpolicy/controller_test.go b/pkg/controller/stackconfigpolicy/controller_test.go
index 851c861bed..34cb9872fe 100644
--- a/pkg/controller/stackconfigpolicy/controller_test.go
+++ b/pkg/controller/stackconfigpolicy/controller_test.go
@@ -184,7 +184,7 @@ func TestReconcileStackConfigPolicy_Reconcile(t *testing.T) {
commonlabels.StackConfigPolicyOnDeleteLabelName: commonlabels.OrphanSecretResetOnPolicyDelete,
},
},
- Data: map[string][]byte{"settings.json": []byte(`{"metadata":{"version":"42","compatibility":"8.4.0"},"state":{"cluster_settings":{"indices.recovery.max_bytes_per_sec":"42mb"},"snapshot_repositories":{},"slm":{},"role_mappings":{},"autoscaling":{},"ilm":{},"ingest_pipelines":{},"index_templates":{"component_templates":{},"composable_index_templates":{}}}}`)},
+ Data: map[string][]byte{"settings.json": []byte(`{"metadata":{"version":"42","compatibility":"8.4.0"},"state":{"cluster_settings":{"indices":{"recovery":{"max_bytes_per_sec":"42mb"}}},"snapshot_repositories":{},"slm":{},"role_mappings":{},"autoscaling":{},"ilm":{},"ingest_pipelines":{},"index_templates":{"component_templates":{},"composable_index_templates":{}}}}`)},
}
secretHash, err := getSettingsHash(secretFixture)
assert.NoError(t, err)
@@ -364,40 +364,44 @@ func TestReconcileStackConfigPolicy_Reconcile(t *testing.T) {
wantRequeueAfter: true,
},
{
- name: "Reconcile Kibana already owned by another policy",
+ name: "Reconcile Kibana with multiple policies (multi-policy support)",
args: args{
client: k8s.NewFakeClient(&policyFixture, &kibanaFixture, MkKibanaConfigSecret("ns", "another-policy", "ns", "testvalue")),
licenseChecker: &license.MockLicenseChecker{EnterpriseEnabled: true},
},
post: func(r ReconcileStackConfigPolicy, recorder record.FakeRecorder) {
+ // With multi-policy support, no conflict events should be generated
events := fetchEvents(&recorder)
- assert.ElementsMatch(t, []string{"Warning Unexpected conflict: resource Kibana ns/test-kb already configured by StackConfigpolicy ns/another-policy"}, events)
+ assert.Empty(t, events)
policy := r.getPolicy(t, k8s.ExtractNamespacedName(&policyFixture))
assert.Equal(t, 1, policy.Status.Resources)
- assert.Equal(t, 0, policy.Status.Ready)
- assert.Equal(t, policyv1alpha1.ConflictPhase, policy.Status.Phase)
+ // The policy should be applying changes since multi-policy merging is happening
+ assert.Equal(t, 0, policy.Status.Ready) // Still applying changes
+ assert.Equal(t, policyv1alpha1.ApplyingChangesPhase, policy.Status.Phase)
},
- wantErr: true,
- wantRequeueAfter: true,
+ wantErr: false,
+ wantRequeueAfter: true, // Should requeue while applying changes
},
{
- name: "Reconcile Elasticsearch already owned by another policy",
+ name: "Reconcile Elasticsearch with multiple policies (multi-policy support)",
args: args{
client: k8s.NewFakeClient(&policyFixture, &esFixture, conflictingSecretFixture),
licenseChecker: &license.MockLicenseChecker{EnterpriseEnabled: true},
},
post: func(r ReconcileStackConfigPolicy, recorder record.FakeRecorder) {
+ // With multi-policy support, no conflict events should be generated
events := fetchEvents(&recorder)
- assert.ElementsMatch(t, []string{"Warning Unexpected conflict: resource Elasticsearch ns/test-es already configured by StackConfigpolicy ns/another-policy"}, events)
+ assert.Empty(t, events)
policy := r.getPolicy(t, k8s.ExtractNamespacedName(&policyFixture))
assert.Equal(t, 1, policy.Status.Resources)
- assert.Equal(t, 0, policy.Status.Ready)
- assert.Equal(t, policyv1alpha1.ConflictPhase, policy.Status.Phase)
+ // The policy should show an error since Elasticsearch client might not be available in test
+ assert.Equal(t, 0, policy.Status.Ready) // Error state
+ assert.Equal(t, policyv1alpha1.ErrorPhase, policy.Status.Phase) // Error due to test limitations
},
- wantErr: true,
- wantRequeueAfter: true,
+ wantErr: false,
+ wantRequeueAfter: true, // Should requeue on errors
},
{
name: "Elasticsearch cluster in old version without support for file based settings",
@@ -443,11 +447,21 @@ func TestReconcileStackConfigPolicy_Reconcile(t *testing.T) {
},
pre: func(r ReconcileStackConfigPolicy) {
settings := r.getSettings(t, k8s.ExtractNamespacedName(&secretFixture))
- assert.Equal(t, "42mb", settings.State.ClusterSettings.Data["indices.recovery.max_bytes_per_sec"])
+ // Check nested format in test data
+ indices, ok := settings.State.ClusterSettings.Data["indices"].(map[string]interface{})
+ assert.True(t, ok, "indices should be a map")
+ recovery, ok := indices["recovery"].(map[string]interface{})
+ assert.True(t, ok, "recovery should be a map")
+ assert.Equal(t, "42mb", recovery["max_bytes_per_sec"])
},
post: func(r ReconcileStackConfigPolicy, recorder record.FakeRecorder) {
settings := r.getSettings(t, k8s.ExtractNamespacedName(&secretFixture))
- assert.Equal(t, "43mb", settings.State.ClusterSettings.Data["indices.recovery.max_bytes_per_sec"])
+ // After normalization, cluster settings should be in nested format
+ indices, ok := settings.State.ClusterSettings.Data["indices"].(map[string]interface{})
+ assert.True(t, ok, "indices should be a map")
+ recovery, ok := indices["recovery"].(map[string]interface{})
+ assert.True(t, ok, "recovery should be a map")
+ assert.Equal(t, "43mb", recovery["max_bytes_per_sec"])
var policy policyv1alpha1.StackConfigPolicy
err := r.Client.Get(context.Background(), types.NamespacedName{
diff --git a/pkg/controller/stackconfigpolicy/elasticsearch_config_settings.go b/pkg/controller/stackconfigpolicy/elasticsearch_config_settings.go
index f190cb637b..4d05b6f268 100644
--- a/pkg/controller/stackconfigpolicy/elasticsearch_config_settings.go
+++ b/pkg/controller/stackconfigpolicy/elasticsearch_config_settings.go
@@ -7,6 +7,7 @@ package stackconfigpolicy
import (
"context"
"encoding/json"
+ "sort"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -33,24 +34,106 @@ const (
SecretsMountKey = "secretMounts.json"
)
-func newElasticsearchConfigSecret(policy policyv1alpha1.StackConfigPolicy, es esv1.Elasticsearch) (corev1.Secret, error) {
+// reconcileSecretMounts creates the secrets in SecretMounts to the respective Elasticsearch namespace where they should be mounted to.
+func reconcileSecretMounts(ctx context.Context, c k8s.Client, es esv1.Elasticsearch, policy *policyv1alpha1.StackConfigPolicy, meta metadata.Metadata) error {
+ for _, secretMount := range policy.Spec.Elasticsearch.SecretMounts {
+ additionalSecret := corev1.Secret{}
+ namespacedName := types.NamespacedName{
+ Name: secretMount.SecretName,
+ Namespace: policy.Namespace,
+ }
+ if err := c.Get(ctx, namespacedName, &additionalSecret); err != nil {
+ return err
+ }
+
+ meta = meta.Merge(metadata.Metadata{
+ Annotations: map[string]string{
+ commonannotation.SourceSecretAnnotationName: secretMount.SecretName,
+ },
+ })
+ // Recreate it in the Elasticsearch namespace, prefix with es name.
+ secretName := esv1.StackConfigAdditionalSecretName(es.Name, secretMount.SecretName)
+ expected := corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: es.Namespace,
+ Name: secretName,
+ Labels: meta.Labels,
+ Annotations: meta.Annotations,
+ },
+ Data: additionalSecret.Data,
+ }
+
+ // Set stackconfigpolicy as a softowner
+ filesettings.SetSoftOwner(&expected, *policy)
+
+ // Set the secret to be deleted when the stack config policy is deleted.
+ expected.Labels[commonlabels.StackConfigPolicyOnDeleteLabelName] = commonlabels.OrphanSecretDeleteOnPolicyDelete
+
+ if _, err := reconciler.ReconcileSecret(ctx, c, expected, nil); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func getElasticsearchConfigAndMountsHash(elasticsearchConfig *commonv1.Config, secretMounts []policyv1alpha1.SecretMount) string {
+ if elasticsearchConfig != nil {
+ return hash.HashObject([]interface{}{elasticsearchConfig, secretMounts})
+ }
+ return hash.HashObject(secretMounts)
+}
+
+// Multi-policy versions of the above functions
+
+// newElasticsearchConfigSecretFromPolicies creates an Elasticsearch config secret from multiple policies
+func (r *ReconcileStackConfigPolicy) newElasticsearchConfigSecretFromPolicies(policies []policyv1alpha1.StackConfigPolicy, es esv1.Elasticsearch) (corev1.Secret, error) {
data := make(map[string][]byte)
- if len(policy.Spec.Elasticsearch.SecretMounts) > 0 {
- secretMountBytes, err := json.Marshal(policy.Spec.Elasticsearch.SecretMounts)
+ var allSecretMounts []policyv1alpha1.SecretMount
+ var mergedConfig *commonv1.Config
+
+ // Sort policies by weight (descending) so lower weights override higher ones
+ sortedPolicies := make([]policyv1alpha1.StackConfigPolicy, len(policies))
+ copy(sortedPolicies, policies)
+ sort.SliceStable(sortedPolicies, func(i, j int) bool {
+ return sortedPolicies[i].Spec.Weight > sortedPolicies[j].Spec.Weight
+ })
+
+ // Merge secret mounts from all policies
+ for _, policy := range sortedPolicies {
+ allSecretMounts = append(allSecretMounts, policy.Spec.Elasticsearch.SecretMounts...)
+
+ // Merge Elasticsearch configs (lower weight policies override higher ones)
+ if policy.Spec.Elasticsearch.Config != nil {
+ if mergedConfig == nil {
+ mergedConfig = policy.Spec.Elasticsearch.Config.DeepCopy()
+ } else {
+ // Merge the config data, with current policy taking precedence
+ for key, value := range policy.Spec.Elasticsearch.Config.Data {
+ mergedConfig.Data[key] = value
+ }
+ }
+ }
+ }
+
+ // Add secret mounts to data if any exist
+ if len(allSecretMounts) > 0 {
+ secretMountBytes, err := json.Marshal(allSecretMounts)
if err != nil {
return corev1.Secret{}, err
}
data[SecretsMountKey] = secretMountBytes
}
- elasticsearchAndMountsConfigHash := getElasticsearchConfigAndMountsHash(policy.Spec.Elasticsearch.Config, policy.Spec.Elasticsearch.SecretMounts)
- if policy.Spec.Elasticsearch.Config != nil {
- configDataJSONBytes, err := policy.Spec.Elasticsearch.Config.MarshalJSON()
+ // Add merged config to data if it exists
+ elasticsearchAndMountsConfigHash := getElasticsearchConfigAndMountsHash(mergedConfig, allSecretMounts)
+ if mergedConfig != nil {
+ configDataJSONBytes, err := mergedConfig.MarshalJSON()
if err != nil {
return corev1.Secret{}, err
}
data[ElasticSearchConfigKey] = configDataJSONBytes
}
+
meta := metadata.Propagate(&es, metadata.Metadata{
Labels: eslabel.NewLabels(k8s.ExtractNamespacedName(&es)),
Annotations: map[string]string{
@@ -67,77 +150,146 @@ func newElasticsearchConfigSecret(policy policyv1alpha1.StackConfigPolicy, es es
Data: data,
}
- // Set StackConfigPolicy as the soft owner
- filesettings.SetSoftOwner(&elasticsearchConfigSecret, policy)
+ // Store all policy references in the secret
+ policyRefs := make([]filesettings.PolicyRef, 0, len(policies))
+ for _, policy := range policies {
+ policyRefs = append(policyRefs, filesettings.PolicyRef{
+ Name: policy.Name,
+ Namespace: policy.Namespace,
+ Weight: policy.Spec.Weight,
+ })
+ }
+ if err := filesettings.SetPolicyRefs(&elasticsearchConfigSecret, policyRefs); err != nil {
+ return corev1.Secret{}, err
+ }
- // Add label to delete secret on deletion of the stack config policy
+ // Add label to delete secret on deletion of stack config policies
elasticsearchConfigSecret.Labels[commonlabels.StackConfigPolicyOnDeleteLabelName] = commonlabels.OrphanSecretDeleteOnPolicyDelete
return elasticsearchConfigSecret, nil
}
-// reconcileSecretMounts creates the secrets in SecretMounts to the respective Elasticsearch namespace where they should be mounted to.
-func reconcileSecretMounts(ctx context.Context, c k8s.Client, es esv1.Elasticsearch, policy *policyv1alpha1.StackConfigPolicy, meta metadata.Metadata) error {
- for _, secretMount := range policy.Spec.Elasticsearch.SecretMounts {
+// reconcileSecretMountsFromPolicies creates secrets from all policies' SecretMounts
+func (r *ReconcileStackConfigPolicy) reconcileSecretMountsFromPolicies(ctx context.Context, es esv1.Elasticsearch, policies []policyv1alpha1.StackConfigPolicy, meta metadata.Metadata) error {
+ // Collect all unique secret mounts from all policies
+ secretMountMap := make(map[string]policyv1alpha1.SecretMount) // key is secretName to avoid duplicates
+ for _, policy := range policies {
+ for _, secretMount := range policy.Spec.Elasticsearch.SecretMounts {
+ secretMountMap[secretMount.SecretName] = secretMount
+ }
+ }
+
+ for _, secretMount := range secretMountMap {
+ // Find the policy that contains this secret mount (use the first one found for namespace)
+ var sourcePolicy *policyv1alpha1.StackConfigPolicy
+ for _, policy := range policies {
+ for _, mount := range policy.Spec.Elasticsearch.SecretMounts {
+ if mount.SecretName == secretMount.SecretName {
+ sourcePolicy = &policy
+ break
+ }
+ }
+ if sourcePolicy != nil {
+ break
+ }
+ }
+
+ if sourcePolicy == nil {
+ continue
+ }
+
additionalSecret := corev1.Secret{}
namespacedName := types.NamespacedName{
Name: secretMount.SecretName,
- Namespace: policy.Namespace,
+ Namespace: sourcePolicy.Namespace,
}
- if err := c.Get(ctx, namespacedName, &additionalSecret); err != nil {
+ if err := r.Client.Get(ctx, namespacedName, &additionalSecret); err != nil {
return err
}
- meta = meta.Merge(metadata.Metadata{
+ secretMeta := meta.Merge(metadata.Metadata{
Annotations: map[string]string{
commonannotation.SourceSecretAnnotationName: secretMount.SecretName,
},
})
+
// Recreate it in the Elasticsearch namespace, prefix with es name.
secretName := esv1.StackConfigAdditionalSecretName(es.Name, secretMount.SecretName)
expected := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: es.Namespace,
Name: secretName,
- Labels: meta.Labels,
- Annotations: meta.Annotations,
+ Labels: secretMeta.Labels,
+ Annotations: secretMeta.Annotations,
},
Data: additionalSecret.Data,
}
- // Set stackconfigpolicy as a softowner
- filesettings.SetSoftOwner(&expected, *policy)
+ // Store policy references that use this secret mount
+ policyRefs := make([]filesettings.PolicyRef, 0, len(policies))
+ for _, policy := range policies {
+ for _, mount := range policy.Spec.Elasticsearch.SecretMounts {
+ if mount.SecretName == secretMount.SecretName {
+ policyRefs = append(policyRefs, filesettings.PolicyRef{
+ Name: policy.Name,
+ Namespace: policy.Namespace,
+ Weight: policy.Spec.Weight,
+ })
+ break
+ }
+ }
+ }
+ if err := filesettings.SetPolicyRefs(&expected, policyRefs); err != nil {
+ return err
+ }
- // Set the secret to be deleted when the stack config policy is deleted.
+ // Set the secret to be deleted when stack config policies are deleted
expected.Labels[commonlabels.StackConfigPolicyOnDeleteLabelName] = commonlabels.OrphanSecretDeleteOnPolicyDelete
- if _, err := reconciler.ReconcileSecret(ctx, c, expected, nil); err != nil {
+ if _, err := reconciler.ReconcileSecret(ctx, r.Client, expected, nil); err != nil {
return err
}
}
return nil
}
-func getElasticsearchConfigAndMountsHash(elasticsearchConfig *commonv1.Config, secretMounts []policyv1alpha1.SecretMount) string {
- if elasticsearchConfig != nil {
- return hash.HashObject([]interface{}{elasticsearchConfig, secretMounts})
- }
- return hash.HashObject(secretMounts)
-}
-
-// elasticsearchConfigAndSecretMountsApplied checks if the Elasticsearch config and secret mounts from the stack config policy have been applied to the Elasticsearch cluster.
-func elasticsearchConfigAndSecretMountsApplied(ctx context.Context, c k8s.Client, policy policyv1alpha1.StackConfigPolicy, es esv1.Elasticsearch) (bool, error) {
+// elasticsearchConfigAndSecretMountsAppliedFromPolicies checks if configs from all policies have been applied
+func (r *ReconcileStackConfigPolicy) elasticsearchConfigAndSecretMountsAppliedFromPolicies(ctx context.Context, policies []policyv1alpha1.StackConfigPolicy, es esv1.Elasticsearch) (bool, error) {
// Get Pods for the given Elasticsearch
podList := corev1.PodList{}
- if err := c.List(ctx, &podList, client.InNamespace(es.Namespace), client.MatchingLabels{
+ if err := r.Client.List(ctx, &podList, client.InNamespace(es.Namespace), client.MatchingLabels{
eslabel.ClusterNameLabelName: es.Name,
}); err != nil || len(podList.Items) == 0 {
return false, err
}
- elasticsearchAndMountsConfigHash := getElasticsearchConfigAndMountsHash(policy.Spec.Elasticsearch.Config, policy.Spec.Elasticsearch.SecretMounts)
+ // Compute expected hash from merged policies
+ var allSecretMounts []policyv1alpha1.SecretMount
+ var mergedConfig *commonv1.Config
+
+ // Sort policies by weight and merge (descending order)
+ sortedPolicies := make([]policyv1alpha1.StackConfigPolicy, len(policies))
+ copy(sortedPolicies, policies)
+ sort.SliceStable(sortedPolicies, func(i, j int) bool {
+ return sortedPolicies[i].Spec.Weight > sortedPolicies[j].Spec.Weight
+ })
+
+ for _, policy := range sortedPolicies {
+ allSecretMounts = append(allSecretMounts, policy.Spec.Elasticsearch.SecretMounts...)
+ if policy.Spec.Elasticsearch.Config != nil {
+ if mergedConfig == nil {
+ mergedConfig = policy.Spec.Elasticsearch.Config.DeepCopy()
+ } else {
+ for key, value := range policy.Spec.Elasticsearch.Config.Data {
+ mergedConfig.Data[key] = value
+ }
+ }
+ }
+ }
+
+ expectedHash := getElasticsearchConfigAndMountsHash(mergedConfig, allSecretMounts)
for _, esPod := range podList.Items {
- if esPod.Annotations[commonannotation.ElasticsearchConfigAndSecretMountsHashAnnotation] != elasticsearchAndMountsConfigHash {
+ if esPod.Annotations[commonannotation.ElasticsearchConfigAndSecretMountsHashAnnotation] != expectedHash {
return false, nil
}
}
diff --git a/pkg/controller/stackconfigpolicy/kibana_config_settings.go b/pkg/controller/stackconfigpolicy/kibana_config_settings.go
index b740661052..76df874f3d 100644
--- a/pkg/controller/stackconfigpolicy/kibana_config_settings.go
+++ b/pkg/controller/stackconfigpolicy/kibana_config_settings.go
@@ -7,6 +7,7 @@ package stackconfigpolicy
import (
"context"
"encoding/json"
+ "sort"
"github.com/elastic/cloud-on-k8s/v3/pkg/controller/common/metadata"
@@ -144,3 +145,143 @@ func setKibanaSecureSettings(settingsSecret *corev1.Secret, policy policyv1alpha
settingsSecret.Annotations[commonannotation.SecureSettingsSecretsAnnotationName] = string(bytes)
return nil
}
+
+// Multi-policy versions of Kibana functions
+
+// newKibanaConfigSecretFromPolicies creates a Kibana config secret from multiple policies
+func (r *ReconcileStackConfigPolicy) newKibanaConfigSecretFromPolicies(policies []policyv1alpha1.StackConfigPolicy, kibana kibanav1.Kibana) (corev1.Secret, error) {
+ var mergedConfig *commonv1.Config
+
+ // Sort policies by weight (descending) so lower weights override higher ones
+ sortedPolicies := make([]policyv1alpha1.StackConfigPolicy, len(policies))
+ copy(sortedPolicies, policies)
+ sort.SliceStable(sortedPolicies, func(i, j int) bool {
+ return sortedPolicies[i].Spec.Weight > sortedPolicies[j].Spec.Weight
+ })
+
+ // Merge Kibana configs (lower weight policies override higher ones)
+ for _, policy := range sortedPolicies {
+ if policy.Spec.Kibana.Config != nil {
+ if mergedConfig == nil {
+ mergedConfig = policy.Spec.Kibana.Config.DeepCopy()
+ } else {
+ // Merge the config data, with current policy taking precedence
+ for key, value := range policy.Spec.Kibana.Config.Data {
+ mergedConfig.Data[key] = value
+ }
+ }
+ }
+ }
+
+ kibanaConfigHash := getKibanaConfigHash(mergedConfig)
+ configDataJSONBytes := []byte("")
+ var err error
+ if mergedConfig != nil {
+ if configDataJSONBytes, err = mergedConfig.MarshalJSON(); err != nil {
+ return corev1.Secret{}, err
+ }
+ }
+
+ meta := metadata.Propagate(&kibana, metadata.Metadata{
+ Labels: kblabel.NewLabels(k8s.ExtractNamespacedName(&kibana)),
+ Annotations: map[string]string{
+ commonannotation.KibanaConfigHashAnnotation: kibanaConfigHash,
+ },
+ })
+ kibanaConfigSecret := corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: kibana.Namespace,
+ Name: GetPolicyConfigSecretName(kibana.Name),
+ Labels: meta.Labels,
+ Annotations: meta.Annotations,
+ },
+ Data: map[string][]byte{
+ KibanaConfigKey: configDataJSONBytes,
+ },
+ }
+
+ // Store all policy references in the secret
+ policyRefs := make([]filesettings.PolicyRef, 0, len(policies))
+ for _, policy := range policies {
+ policyRefs = append(policyRefs, filesettings.PolicyRef{
+ Name: policy.Name,
+ Namespace: policy.Namespace,
+ Weight: policy.Spec.Weight,
+ })
+ }
+ if err := filesettings.SetPolicyRefs(&kibanaConfigSecret, policyRefs); err != nil {
+ return corev1.Secret{}, err
+ }
+
+ // Add label to delete secret on deletion of stack config policies
+ kibanaConfigSecret.Labels[commonlabels.StackConfigPolicyOnDeleteLabelName] = commonlabels.OrphanSecretDeleteOnPolicyDelete
+
+ // Add SecureSettings from all policies as annotation
+ if err = r.setKibanaSecureSettingsFromPolicies(&kibanaConfigSecret, policies); err != nil {
+ return kibanaConfigSecret, err
+ }
+
+ return kibanaConfigSecret, nil
+}
+
+// kibanaConfigAppliedFromPolicies checks if configs from all policies have been applied to Kibana
+func (r *ReconcileStackConfigPolicy) kibanaConfigAppliedFromPolicies(policies []policyv1alpha1.StackConfigPolicy, kb kibanav1.Kibana) (bool, error) {
+ existingKibanaPods, err := k8s.PodsMatchingLabels(r.Client, kb.Namespace, map[string]string{"kibana.k8s.elastic.co/name": kb.Name})
+ if err != nil || len(existingKibanaPods) == 0 {
+ return false, err
+ }
+
+ // Compute expected hash from merged policies
+ var mergedConfig *commonv1.Config
+
+ // Sort policies by weight and merge (descending order)
+ sortedPolicies := make([]policyv1alpha1.StackConfigPolicy, len(policies))
+ copy(sortedPolicies, policies)
+ sort.SliceStable(sortedPolicies, func(i, j int) bool {
+ return sortedPolicies[i].Spec.Weight > sortedPolicies[j].Spec.Weight
+ })
+
+ for _, policy := range sortedPolicies {
+ if policy.Spec.Kibana.Config != nil {
+ if mergedConfig == nil {
+ mergedConfig = policy.Spec.Kibana.Config.DeepCopy()
+ } else {
+ for key, value := range policy.Spec.Kibana.Config.Data {
+ mergedConfig.Data[key] = value
+ }
+ }
+ }
+ }
+
+ expectedHash := getKibanaConfigHash(mergedConfig)
+ for _, kbPod := range existingKibanaPods {
+ if kbPod.Annotations[commonannotation.KibanaConfigHashAnnotation] != expectedHash {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+// setKibanaSecureSettingsFromPolicies stores secure settings from multiple policies
+func (r *ReconcileStackConfigPolicy) setKibanaSecureSettingsFromPolicies(settingsSecret *corev1.Secret, policies []policyv1alpha1.StackConfigPolicy) error {
+ var allSecretSources []commonv1.NamespacedSecretSource
+
+ for _, policy := range policies {
+ // SecureSettings field under Kibana in the StackConfigPolicy
+ for _, src := range policy.Spec.Kibana.SecureSettings {
+ allSecretSources = append(allSecretSources, commonv1.NamespacedSecretSource{Namespace: policy.GetNamespace(), SecretName: src.SecretName, Entries: src.Entries})
+ }
+ }
+
+ if len(allSecretSources) == 0 {
+ return nil
+ }
+
+ bytes, err := json.Marshal(allSecretSources)
+ if err != nil {
+ return err
+ }
+ settingsSecret.Annotations[commonannotation.SecureSettingsSecretsAnnotationName] = string(bytes)
+ return nil
+}
diff --git a/pkg/controller/stackconfigpolicy/weight_conflict_test.go b/pkg/controller/stackconfigpolicy/weight_conflict_test.go
new file mode 100644
index 0000000000..ac5776c1eb
--- /dev/null
+++ b/pkg/controller/stackconfigpolicy/weight_conflict_test.go
@@ -0,0 +1,442 @@
+// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+// or more contributor license agreements. Licensed under the Elastic License 2.0;
+// you may not use this file except in compliance with the Elastic License 2.0.
+
+package stackconfigpolicy
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/tools/record"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ commonv1 "github.com/elastic/cloud-on-k8s/v3/pkg/apis/common/v1"
+ policyv1alpha1 "github.com/elastic/cloud-on-k8s/v3/pkg/apis/stackconfigpolicy/v1alpha1"
+ "github.com/elastic/cloud-on-k8s/v3/pkg/controller/common/license"
+ "github.com/elastic/cloud-on-k8s/v3/pkg/controller/common/operator"
+ "github.com/elastic/cloud-on-k8s/v3/pkg/controller/common/watches"
+)
+
+func TestCheckWeightConflicts(t *testing.T) {
+ scheme := runtime.NewScheme()
+ require.NoError(t, policyv1alpha1.AddToScheme(scheme))
+
+ tests := []struct {
+ name string
+ currentPolicy *policyv1alpha1.StackConfigPolicy
+ existingPolicies []policyv1alpha1.StackConfigPolicy
+ operatorNamespace string
+ expectError bool
+ errorContains string
+ }{
+ {
+ name: "no conflicts - different weights",
+ currentPolicy: &policyv1alpha1.StackConfigPolicy{
+ ObjectMeta: metav1.ObjectMeta{Name: "policy1", Namespace: "default"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ },
+ },
+ existingPolicies: []policyv1alpha1.StackConfigPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "policy2", Namespace: "default"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 20,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ },
+ },
+ },
+ operatorNamespace: "elastic-system",
+ expectError: false,
+ },
+ {
+ name: "conflict - same weight, overlapping selectors",
+ currentPolicy: &policyv1alpha1.StackConfigPolicy{
+ ObjectMeta: metav1.ObjectMeta{Name: "policy1", Namespace: "default"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ },
+ },
+ existingPolicies: []policyv1alpha1.StackConfigPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "policy2", Namespace: "default"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ },
+ },
+ },
+ operatorNamespace: "elastic-system",
+ expectError: true,
+ errorContains: "weight conflict detected",
+ },
+ {
+ name: "no conflict - same weight, disjoint selectors",
+ currentPolicy: &policyv1alpha1.StackConfigPolicy{
+ ObjectMeta: metav1.ObjectMeta{Name: "policy1", Namespace: "default"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ },
+ },
+ existingPolicies: []policyv1alpha1.StackConfigPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "policy2", Namespace: "default"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "kibana"},
+ },
+ },
+ },
+ },
+ operatorNamespace: "elastic-system",
+ expectError: false,
+ },
+ {
+ name: "no conflict - same weight, different namespaces (non-operator)",
+ currentPolicy: &policyv1alpha1.StackConfigPolicy{
+ ObjectMeta: metav1.ObjectMeta{Name: "policy1", Namespace: "namespace1"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ },
+ },
+ existingPolicies: []policyv1alpha1.StackConfigPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "policy2", Namespace: "namespace2"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ },
+ },
+ },
+ operatorNamespace: "elastic-system",
+ expectError: false,
+ },
+ {
+ name: "conflict - same weight, operator namespace policy",
+ currentPolicy: &policyv1alpha1.StackConfigPolicy{
+ ObjectMeta: metav1.ObjectMeta{Name: "policy1", Namespace: "elastic-system"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ },
+ },
+ existingPolicies: []policyv1alpha1.StackConfigPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "policy2", Namespace: "default"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ },
+ },
+ },
+ operatorNamespace: "elastic-system",
+ expectError: true,
+ errorContains: "weight conflict detected",
+ },
+ {
+ name: "no conflict - empty selectors but different namespaces",
+ currentPolicy: &policyv1alpha1.StackConfigPolicy{
+ ObjectMeta: metav1.ObjectMeta{Name: "policy1", Namespace: "namespace1"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{},
+ },
+ },
+ existingPolicies: []policyv1alpha1.StackConfigPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "policy2", Namespace: "namespace2"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{},
+ },
+ },
+ },
+ operatorNamespace: "elastic-system",
+ expectError: false,
+ },
+ {
+ name: "no conflict - same weight, same selectors, but different snapshot repositories",
+ currentPolicy: &policyv1alpha1.StackConfigPolicy{
+ ObjectMeta: metav1.ObjectMeta{Name: "policy1", Namespace: "default"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ Elasticsearch: policyv1alpha1.ElasticsearchConfigPolicySpec{
+ SnapshotRepositories: &commonv1.Config{
+ Data: map[string]interface{}{
+ "policy-1-backups": map[string]interface{}{
+ "type": "s3",
+ "settings": map[string]interface{}{
+ "bucket": "policy-1-backups",
+ "region": "us-west-2",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ existingPolicies: []policyv1alpha1.StackConfigPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "policy2", Namespace: "default"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ Elasticsearch: policyv1alpha1.ElasticsearchConfigPolicySpec{
+ SnapshotRepositories: &commonv1.Config{
+ Data: map[string]interface{}{
+ "policy-2-backups": map[string]interface{}{
+ "type": "s3",
+ "settings": map[string]interface{}{
+ "bucket": "policy-2-backups",
+ "region": "us-east-1",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ operatorNamespace: "elastic-system",
+ expectError: false,
+ },
+ {
+ name: "conflict - same weight, same selectors, conflicting snapshot repositories",
+ currentPolicy: &policyv1alpha1.StackConfigPolicy{
+ ObjectMeta: metav1.ObjectMeta{Name: "policy1", Namespace: "default"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ Elasticsearch: policyv1alpha1.ElasticsearchConfigPolicySpec{
+ SnapshotRepositories: &commonv1.Config{
+ Data: map[string]interface{}{
+ "shared-backups": map[string]interface{}{
+ "type": "s3",
+ "settings": map[string]interface{}{
+ "bucket": "policy-1-backups",
+ "region": "us-west-2",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ existingPolicies: []policyv1alpha1.StackConfigPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "policy2", Namespace: "default"},
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ Elasticsearch: policyv1alpha1.ElasticsearchConfigPolicySpec{
+ SnapshotRepositories: &commonv1.Config{
+ Data: map[string]interface{}{
+ "shared-backups": map[string]interface{}{
+ "type": "s3",
+ "settings": map[string]interface{}{
+ "bucket": "policy-2-backups",
+ "region": "us-east-1",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ operatorNamespace: "elastic-system",
+ expectError: true,
+ errorContains: "weight conflict detected",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ objs := make([]client.Object, len(tt.existingPolicies))
+ for i := range tt.existingPolicies {
+ objs[i] = &tt.existingPolicies[i]
+ }
+
+ fakeClient := fake.NewClientBuilder().
+ WithScheme(scheme).
+ WithObjects(objs...).
+ Build()
+
+ r := &ReconcileStackConfigPolicy{
+ Client: fakeClient,
+ params: operator.Parameters{
+ OperatorNamespace: tt.operatorNamespace,
+ },
+ recorder: record.NewFakeRecorder(10),
+ licenseChecker: &license.MockLicenseChecker{EnterpriseEnabled: true},
+ dynamicWatches: watches.NewDynamicWatches(),
+ }
+
+ err := r.checkWeightConflicts(context.Background(), tt.currentPolicy)
+
+ if tt.expectError {
+ assert.Error(t, err)
+ if tt.errorContains != "" {
+ assert.Contains(t, err.Error(), tt.errorContains)
+ }
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestSelectorsCouldOverlap(t *testing.T) {
+ scheme := runtime.NewScheme()
+ require.NoError(t, policyv1alpha1.AddToScheme(scheme))
+
+ r := &ReconcileStackConfigPolicy{}
+
+ tests := []struct {
+ name string
+ selector1 metav1.LabelSelector
+ selector2 metav1.LabelSelector
+ expected bool
+ }{
+ {
+ name: "both empty selectors",
+ selector1: metav1.LabelSelector{},
+ selector2: metav1.LabelSelector{},
+ expected: true,
+ },
+ {
+ name: "one empty selector",
+ selector1: metav1.LabelSelector{},
+ selector2: metav1.LabelSelector{MatchLabels: map[string]string{"app": "test"}},
+ expected: true,
+ },
+ {
+ name: "same labels",
+ selector1: metav1.LabelSelector{MatchLabels: map[string]string{"app": "elasticsearch"}},
+ selector2: metav1.LabelSelector{MatchLabels: map[string]string{"app": "elasticsearch"}},
+ expected: true,
+ },
+ {
+ name: "different labels",
+ selector1: metav1.LabelSelector{MatchLabels: map[string]string{"app": "elasticsearch"}},
+ selector2: metav1.LabelSelector{MatchLabels: map[string]string{"app": "kibana"}},
+ expected: false,
+ },
+ {
+ name: "overlapping labels",
+ selector1: metav1.LabelSelector{MatchLabels: map[string]string{"app": "elasticsearch", "env": "prod"}},
+ selector2: metav1.LabelSelector{MatchLabels: map[string]string{"app": "elasticsearch", "version": "8.0"}},
+ expected: true,
+ },
+ {
+ name: "completely disjoint labels",
+ selector1: metav1.LabelSelector{MatchLabels: map[string]string{"app": "elasticsearch", "env": "prod"}},
+ selector2: metav1.LabelSelector{MatchLabels: map[string]string{"app": "kibana", "env": "test"}},
+ expected: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ selector1, err := metav1.LabelSelectorAsSelector(&tt.selector1)
+ require.NoError(t, err)
+
+ selector2, err := metav1.LabelSelectorAsSelector(&tt.selector2)
+ require.NoError(t, err)
+
+ result := r.selectorsCouldOverlap(selector1, selector2)
+ assert.Equal(t, tt.expected, result)
+ })
+ }
+}
+
+func TestNamespacesCouldOverlap(t *testing.T) {
+ r := &ReconcileStackConfigPolicy{
+ params: operator.Parameters{
+ OperatorNamespace: "elastic-system",
+ },
+ }
+
+ tests := []struct {
+ name string
+ ns1 string
+ ns2 string
+ expected bool
+ }{
+ {
+ name: "same namespace",
+ ns1: "default",
+ ns2: "default",
+ expected: true,
+ },
+ {
+ name: "different non-operator namespaces",
+ ns1: "namespace1",
+ ns2: "namespace2",
+ expected: false,
+ },
+ {
+ name: "one is operator namespace",
+ ns1: "elastic-system",
+ ns2: "default",
+ expected: true,
+ },
+ {
+ name: "other is operator namespace",
+ ns1: "default",
+ ns2: "elastic-system",
+ expected: true,
+ },
+ {
+ name: "both are operator namespace",
+ ns1: "elastic-system",
+ ns2: "elastic-system",
+ expected: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := r.namespacesCouldOverlap(tt.ns1, tt.ns2)
+ assert.Equal(t, tt.expected, result)
+ })
+ }
+}
diff --git a/test/e2e/es/stackconfigpolicy_test.go b/test/e2e/es/stackconfigpolicy_test.go
index 01f48ef358..053faa3436 100644
--- a/test/e2e/es/stackconfigpolicy_test.go
+++ b/test/e2e/es/stackconfigpolicy_test.go
@@ -2,8 +2,6 @@
// or more contributor license agreements. Licensed under the Elastic License 2.0;
// you may not use this file except in compliance with the Elastic License 2.0.
-//go:build es || e2e
-
package es
import (
@@ -336,6 +334,227 @@ func TestStackConfigPolicy(t *testing.T) {
test.Sequence(nil, steps, esWithlicense).RunSequential(t)
}
+// TestStackConfigPolicyMultipleWeights tests multiple StackConfigPolicies with different weights.
+func TestStackConfigPolicyMultipleWeights(t *testing.T) {
+ // only execute this test if we have a test license to work with
+ if test.Ctx().TestLicense == "" {
+ t.SkipNow()
+ }
+
+ // StackConfigPolicy is supported for ES versions with file-based settings feature
+ stackVersion := version.MustParse(test.Ctx().ElasticStackVersion)
+ if !stackVersion.GTE(filesettings.FileBasedSettingsMinPreVersion) {
+ t.SkipNow()
+ }
+
+ es := elasticsearch.NewBuilder("test-es-scp-multi").
+ WithESMasterDataNodes(1, elasticsearch.DefaultResources).
+ WithLabel("app", "elasticsearch")
+
+ namespace := test.Ctx().ManagedNamespace(0)
+
+ // Policy with weight 20 (lower priority) - sets cluster.name
+ lowPriorityPolicy := policyv1alpha1.StackConfigPolicy{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: fmt.Sprintf("low-priority-scp-%s", rand.String(4)),
+ },
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 20,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ Elasticsearch: policyv1alpha1.ElasticsearchConfigPolicySpec{
+ Config: &commonv1.Config{
+ Data: map[string]interface{}{
+ "cluster.name": "low-priority-cluster",
+ },
+ },
+ ClusterSettings: &commonv1.Config{
+ Data: map[string]interface{}{
+ "indices": map[string]interface{}{
+ "recovery.max_bytes_per_sec": "50mb",
+ },
+ },
+ },
+ },
+ },
+ }
+
+ // Policy with weight 10 (higher priority) - should override cluster.name and settings
+ highPriorityPolicy := policyv1alpha1.StackConfigPolicy{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: fmt.Sprintf("high-priority-scp-%s", rand.String(4)),
+ },
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"},
+ },
+ Elasticsearch: policyv1alpha1.ElasticsearchConfigPolicySpec{
+ Config: &commonv1.Config{
+ Data: map[string]interface{}{
+ "cluster": map[string]interface{}{
+ "name": "high-priority-cluster",
+ },
+ },
+ },
+ ClusterSettings: &commonv1.Config{
+ Data: map[string]interface{}{
+ "indices": map[string]interface{}{
+ "recovery": map[string]interface{}{
+ "max_bytes_per_sec": "200mb",
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ // Policy with same weight 20 but different selector (should not conflict)
+ nonConflictingPolicy := policyv1alpha1.StackConfigPolicy{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: fmt.Sprintf("non-conflicting-scp-%s", rand.String(4)),
+ },
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 20,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "kibana"}, // Different selector
+ },
+ Elasticsearch: policyv1alpha1.ElasticsearchConfigPolicySpec{
+ Config: &commonv1.Config{
+ Data: map[string]interface{}{
+ "cluster.name": "should-not-apply",
+ },
+ },
+ },
+ },
+ }
+
+ esWithlicense := test.LicenseTestBuilder(es)
+
+ steps := func(k *test.K8sClient) test.StepList {
+ return test.StepList{
+ test.Step{
+ Name: "Create low priority StackConfigPolicy",
+ Test: test.Eventually(func() error {
+ return k.CreateOrUpdate(&lowPriorityPolicy)
+ }),
+ },
+ test.Step{
+ Name: "Create high priority StackConfigPolicy",
+ Test: test.Eventually(func() error {
+ return k.CreateOrUpdate(&highPriorityPolicy)
+ }),
+ },
+ test.Step{
+ Name: "Create non-conflicting StackConfigPolicy",
+ Test: test.Eventually(func() error {
+ return k.CreateOrUpdate(&nonConflictingPolicy)
+ }),
+ },
+ test.Step{
+ Name: "High priority cluster name should be applied",
+ Test: test.Eventually(func() error {
+ esClient, err := elasticsearch.NewElasticsearchClient(es.Elasticsearch, k)
+ if err != nil {
+ return err
+ }
+
+ var apiResponse ClusterInfoResponse
+ if _, _, err = request(esClient, http.MethodGet, "/", nil, &apiResponse); err != nil {
+ return err
+ }
+
+ if apiResponse.ClusterName != "high-priority-cluster" {
+ return fmt.Errorf("expected cluster name 'high-priority-cluster', got '%s'", apiResponse.ClusterName)
+ }
+ return nil
+ }),
+ },
+ test.Step{
+ Name: "High priority cluster settings should be applied",
+ Test: test.Eventually(func() error {
+ esClient, err := elasticsearch.NewElasticsearchClient(es.Elasticsearch, k)
+ if err != nil {
+ return err
+ }
+
+ var settings ClusterSettings
+ _, _, err = request(esClient, http.MethodGet, "/_cluster/settings", nil, &settings)
+ if err != nil {
+ return err
+ }
+
+ if settings.Persistent.Indices.Recovery.MaxBytesPerSec != "200mb" {
+ return fmt.Errorf("expected max_bytes_per_sec '200mb', got '%s'", settings.Persistent.Indices.Recovery.MaxBytesPerSec)
+ }
+ return nil
+ }),
+ },
+ test.Step{
+ Name: "Delete high priority policy - low priority should take effect",
+ Test: test.Eventually(func() error {
+ return k.Client.Delete(context.Background(), &highPriorityPolicy)
+ }),
+ },
+ test.Step{
+ Name: "Low priority cluster name should now be applied",
+ Test: test.Eventually(func() error {
+ esClient, err := elasticsearch.NewElasticsearchClient(es.Elasticsearch, k)
+ if err != nil {
+ return err
+ }
+
+ var apiResponse ClusterInfoResponse
+ if _, _, err = request(esClient, http.MethodGet, "/", nil, &apiResponse); err != nil {
+ return err
+ }
+
+ if apiResponse.ClusterName != "low-priority-cluster" {
+ return fmt.Errorf("expected cluster name 'low-priority-cluster', got '%s'", apiResponse.ClusterName)
+ }
+ return nil
+ }),
+ },
+ test.Step{
+ Name: "Low priority cluster settings should now be applied",
+ Test: test.Eventually(func() error {
+ esClient, err := elasticsearch.NewElasticsearchClient(es.Elasticsearch, k)
+ if err != nil {
+ return err
+ }
+
+ var settings ClusterSettings
+ _, _, err = request(esClient, http.MethodGet, "/_cluster/settings", nil, &settings)
+ if err != nil {
+ return err
+ }
+
+ if settings.Persistent.Indices.Recovery.MaxBytesPerSec != "50mb" {
+ return fmt.Errorf("expected max_bytes_per_sec '50mb', got '%s'", settings.Persistent.Indices.Recovery.MaxBytesPerSec)
+ }
+ return nil
+ }),
+ },
+ test.Step{
+ Name: "Clean up remaining policies",
+ Test: test.Eventually(func() error {
+ if err := k.Client.Delete(context.Background(), &lowPriorityPolicy); err != nil {
+ return err
+ }
+ return k.Client.Delete(context.Background(), &nonConflictingPolicy)
+ }),
+ },
+ }
+ }
+
+ test.Sequence(nil, steps, esWithlicense).RunSequential(t)
+}
+
func checkAPIStatusCode(esClient client.Client, url string, expectedStatusCode int) error {
var items map[string]interface{}
_, actualStatusCode, _ := request(esClient, http.MethodGet, url, nil, &items)
diff --git a/test/e2e/kb/stackconfigpolicy_test.go b/test/e2e/kb/stackconfigpolicy_test.go
index a8a2644d84..f64666372b 100644
--- a/test/e2e/kb/stackconfigpolicy_test.go
+++ b/test/e2e/kb/stackconfigpolicy_test.go
@@ -125,3 +125,192 @@ func TestStackConfigPolicyKibana(t *testing.T) {
test.Sequence(nil, steps, esWithlicense, kbBuilder).RunSequential(t)
}
+
+// TestStackConfigPolicyKibanaMultipleWeights tests multiple StackConfigPolicies with different weights for Kibana.
+func TestStackConfigPolicyKibanaMultipleWeights(t *testing.T) {
+ // only execute this test if we have a test license to work with
+ if test.Ctx().TestLicense == "" {
+ t.SkipNow()
+ }
+
+ namespace := test.Ctx().ManagedNamespace(0)
+ // set up a 1-node Kibana deployment
+ name := "test-kb-scp-multi"
+ esBuilder := elasticsearch.NewBuilder(name).
+ WithESMasterDataNodes(1, elasticsearch.DefaultResources)
+ kbBuilder := kibana.NewBuilder(name).
+ WithElasticsearchRef(esBuilder.Ref()).
+ WithNodeCount(1).WithLabel("app", "kibana")
+
+ kbPodListOpts := test.KibanaPodListOptions(kbBuilder.Kibana.Namespace, kbBuilder.Kibana.Name)
+
+ // Policy with weight 20 (lower priority)
+ lowPriorityPolicy := policyv1alpha1.StackConfigPolicy{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: fmt.Sprintf("low-priority-kb-scp-%s", rand.String(4)),
+ },
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 20,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "kibana"},
+ },
+ Kibana: policyv1alpha1.KibanaConfigPolicySpec{
+ Config: &commonv1.Config{
+ Data: map[string]interface{}{
+ "server.customResponseHeaders": map[string]interface{}{
+ "priority": "low",
+ "test-header": "low-priority-value",
+ },
+ "elasticsearch.pingTimeout": "15000",
+ },
+ },
+ SecureSettings: []commonv1.SecretSource{
+ {SecretName: fmt.Sprintf("low-priority-secret-%s", rand.String(4))},
+ },
+ },
+ },
+ }
+
+ // Policy with weight 10 (higher priority) - should override lower priority settings
+ highPriorityPolicy := policyv1alpha1.StackConfigPolicy{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: fmt.Sprintf("high-priority-kb-scp-%s", rand.String(4)),
+ },
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 10,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "kibana"},
+ },
+ Kibana: policyv1alpha1.KibanaConfigPolicySpec{
+ Config: &commonv1.Config{
+ Data: map[string]interface{}{
+ "server.customResponseHeaders": map[string]interface{}{
+ "priority": "high",
+ "test-header": "high-priority-value",
+ },
+ "elasticsearch.pingTimeout": "45000",
+ },
+ },
+ SecureSettings: []commonv1.SecretSource{
+ {SecretName: fmt.Sprintf("high-priority-secret-%s", rand.String(4))},
+ },
+ },
+ },
+ }
+
+ // Policy with same weight 20 but different selector (should not conflict)
+ nonConflictingPolicy := policyv1alpha1.StackConfigPolicy{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: fmt.Sprintf("non-conflicting-kb-scp-%s", rand.String(4)),
+ },
+ Spec: policyv1alpha1.StackConfigPolicySpec{
+ Weight: 20,
+ ResourceSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "elasticsearch"}, // Different selector
+ },
+ Kibana: policyv1alpha1.KibanaConfigPolicySpec{
+ Config: &commonv1.Config{
+ Data: map[string]interface{}{
+ "server.customResponseHeaders": map[string]interface{}{
+ "priority": "should-not-apply",
+ },
+ },
+ },
+ },
+ },
+ }
+
+ // Create secure settings secrets
+ lowPrioritySecret := corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: lowPriorityPolicy.Spec.Kibana.SecureSettings[0].SecretName,
+ Namespace: namespace,
+ },
+ Data: map[string][]byte{
+ "elasticsearch.username": []byte("low_priority_user"),
+ },
+ }
+
+ highPrioritySecret := corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: highPriorityPolicy.Spec.Kibana.SecureSettings[0].SecretName,
+ Namespace: namespace,
+ },
+ Data: map[string][]byte{
+ "elasticsearch.username": []byte("high_priority_user"),
+ },
+ }
+
+ esWithlicense := test.LicenseTestBuilder(esBuilder)
+
+ steps := func(k *test.K8sClient) test.StepList {
+ kibanaChecks := kibana.KbChecks{
+ Client: k,
+ }
+ return test.StepList{
+ test.Step{
+ Name: "Create secure settings secrets",
+ Test: test.Eventually(func() error {
+ if err := k.CreateOrUpdate(&lowPrioritySecret); err != nil {
+ return err
+ }
+ return k.CreateOrUpdate(&highPrioritySecret)
+ }),
+ },
+ test.Step{
+ Name: "Create low priority StackConfigPolicy",
+ Test: test.Eventually(func() error {
+ return k.CreateOrUpdate(&lowPriorityPolicy)
+ }),
+ },
+ test.Step{
+ Name: "Create high priority StackConfigPolicy",
+ Test: test.Eventually(func() error {
+ return k.CreateOrUpdate(&highPriorityPolicy)
+ }),
+ },
+ test.Step{
+ Name: "Create non-conflicting StackConfigPolicy",
+ Test: test.Eventually(func() error {
+ return k.CreateOrUpdate(&nonConflictingPolicy)
+ }),
+ },
+ // High priority settings should be applied
+ kibanaChecks.CheckHeaderForKey(kbBuilder, "priority", "high"),
+ kibanaChecks.CheckHeaderForKey(kbBuilder, "test-header", "high-priority-value"),
+ // High priority secure settings should be in keystore
+ test.CheckKeystoreEntries(k, KibanaKeystoreCmd, []string{"elasticsearch.username"}, kbPodListOpts...),
+ test.Step{
+ Name: "Delete high priority policy - low priority should take effect",
+ Test: test.Eventually(func() error {
+ return k.Client.Delete(context.Background(), &highPriorityPolicy)
+ }),
+ },
+ // Low priority settings should now be applied
+ kibanaChecks.CheckHeaderForKey(kbBuilder, "priority", "low"),
+ kibanaChecks.CheckHeaderForKey(kbBuilder, "test-header", "low-priority-value"),
+ // Low priority secure settings should be in keystore
+ test.CheckKeystoreEntries(k, KibanaKeystoreCmd, []string{"elasticsearch.username"}, kbPodListOpts...),
+ test.Step{
+ Name: "Clean up remaining policies and secrets",
+ Test: test.Eventually(func() error {
+ if err := k.Client.Delete(context.Background(), &lowPriorityPolicy); err != nil {
+ return err
+ }
+ if err := k.Client.Delete(context.Background(), &nonConflictingPolicy); err != nil {
+ return err
+ }
+ if err := k.Client.Delete(context.Background(), &lowPrioritySecret); err != nil {
+ return err
+ }
+ return k.Client.Delete(context.Background(), &highPrioritySecret)
+ }),
+ },
+ }
+ }
+
+ test.Sequence(nil, steps, esWithlicense, kbBuilder).RunSequential(t)
+}