Skip to content
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
annotations: {}
generation: 1
labels:
app.kubernetes.io/instance: some-name
app.kubernetes.io/managed-by: percona-server-mongodb-operator
app.kubernetes.io/name: percona-server-mongodb
app.kubernetes.io/part-of: percona-server-mongodb
name: some-name-ca-cert
ownerReferences:
- blockOwnerDeletion: true
controller: true
kind: PerconaServerMongoDB
name: some-name
spec:
commonName: some-name-ca
duration: 8760h0m0s
isCA: true
issuerRef:
kind: Issuer
name: some-name-psmdb-ca-issuer
renewBefore: 730h0m0s
secretName: some-name-ca-cert
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
annotations: {}
generation: 1
labels:
app.kubernetes.io/instance: some-name
app.kubernetes.io/managed-by: percona-server-mongodb-operator
app.kubernetes.io/name: percona-server-mongodb
app.kubernetes.io/part-of: percona-server-mongodb
name: some-name-ssl-internal
ownerReferences:
- blockOwnerDeletion: true
controller: true
kind: PerconaServerMongoDB
name: some-name
spec:
commonName: some-name
dnsNames:
- localhost
- some-name-rs0
- some-name-rs0.NAME_SPACE
- some-name-rs0.NAME_SPACE.svc.cluster.local
- '*.some-name-rs0'
- '*.some-name-rs0.NAME_SPACE'
- '*.some-name-rs0.NAME_SPACE.svc.cluster.local'
- some-name-rs0.NAME_SPACE.svc.clusterset.local
- '*.some-name-rs0.NAME_SPACE.svc.clusterset.local'
- some-name-rs0-0.clouddemo.xyz
- some-name-rs0-1.clouddemo.xyz
- some-name-rs0-2.clouddemo.xyz
- '*.NAME_SPACE.svc.clusterset.local'
- some-name-mongos
- some-name-mongos.NAME_SPACE
- some-name-mongos.NAME_SPACE.svc.cluster.local
- '*.some-name-mongos'
- '*.some-name-mongos.NAME_SPACE'
- '*.some-name-mongos.NAME_SPACE.svc.cluster.local'
- some-name-cfg
- some-name-cfg.NAME_SPACE
- some-name-cfg.NAME_SPACE.svc.cluster.local
- '*.some-name-cfg'
- '*.some-name-cfg.NAME_SPACE'
- '*.some-name-cfg.NAME_SPACE.svc.cluster.local'
- some-name-mongos.NAME_SPACE.svc.clusterset.local
- '*.some-name-mongos.NAME_SPACE.svc.clusterset.local'
- some-name-cfg.NAME_SPACE.svc.clusterset.local
- '*.some-name-cfg.NAME_SPACE.svc.clusterset.local'
duration: 2160h0m0s
issuerRef:
kind: Issuer
name: some-name-psmdb-issuer
secretName: some-name-ssl-internal
subject:
organizations:
- PSMDB
56 changes: 56 additions & 0 deletions e2e-tests/split-horizon/compare/certificate_some-name-ssl.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
annotations: {}
generation: 1
labels:
app.kubernetes.io/instance: some-name
app.kubernetes.io/managed-by: percona-server-mongodb-operator
app.kubernetes.io/name: percona-server-mongodb
app.kubernetes.io/part-of: percona-server-mongodb
name: some-name-ssl
ownerReferences:
- blockOwnerDeletion: true
controller: true
kind: PerconaServerMongoDB
name: some-name
spec:
commonName: some-name
dnsNames:
- localhost
- some-name-rs0
- some-name-rs0.NAME_SPACE
- some-name-rs0.NAME_SPACE.svc.cluster.local
- '*.some-name-rs0'
- '*.some-name-rs0.NAME_SPACE'
- '*.some-name-rs0.NAME_SPACE.svc.cluster.local'
- some-name-rs0.NAME_SPACE.svc.clusterset.local
- '*.some-name-rs0.NAME_SPACE.svc.clusterset.local'
- some-name-rs0-0.clouddemo.xyz
- some-name-rs0-1.clouddemo.xyz
- some-name-rs0-2.clouddemo.xyz
- '*.NAME_SPACE.svc.clusterset.local'
- some-name-mongos
- some-name-mongos.NAME_SPACE
- some-name-mongos.NAME_SPACE.svc.cluster.local
- '*.some-name-mongos'
- '*.some-name-mongos.NAME_SPACE'
- '*.some-name-mongos.NAME_SPACE.svc.cluster.local'
- some-name-cfg
- some-name-cfg.NAME_SPACE
- some-name-cfg.NAME_SPACE.svc.cluster.local
- '*.some-name-cfg'
- '*.some-name-cfg.NAME_SPACE'
- '*.some-name-cfg.NAME_SPACE.svc.cluster.local'
- some-name-mongos.NAME_SPACE.svc.clusterset.local
- '*.some-name-mongos.NAME_SPACE.svc.clusterset.local'
- some-name-cfg.NAME_SPACE.svc.clusterset.local
- '*.some-name-cfg.NAME_SPACE.svc.clusterset.local'
duration: 2160h0m0s
issuerRef:
kind: Issuer
name: some-name-psmdb-issuer
secretName: some-name-ssl
subject:
organizations:
- PSMDB
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
annotations: {}
generation: 1
labels:
app.kubernetes.io/instance: some-name
app.kubernetes.io/managed-by: percona-server-mongodb-operator
app.kubernetes.io/name: percona-server-mongodb
app.kubernetes.io/part-of: percona-server-mongodb
name: some-name-psmdb-ca-issuer
ownerReferences:
- blockOwnerDeletion: true
controller: true
kind: PerconaServerMongoDB
name: some-name
spec:
selfSigned: {}
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
annotations: {}
generation: 1
labels:
app.kubernetes.io/instance: some-name
app.kubernetes.io/managed-by: percona-server-mongodb-operator
app.kubernetes.io/name: percona-server-mongodb
app.kubernetes.io/part-of: percona-server-mongodb
name: some-name-psmdb-issuer
ownerReferences:
- blockOwnerDeletion: true
controller: true
kind: PerconaServerMongoDB
name: some-name
spec:
ca:
secretName: some-name-ca-cert
140 changes: 76 additions & 64 deletions e2e-tests/split-horizon/run
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,17 @@
set -o errexit
set -o xtrace

test_dir=$(realpath $(dirname $0))
. ${test_dir}/../functions
test_dir=$(realpath "$(dirname "$0")")
. "${test_dir}"/../functions

configure_client_hostAliases() {
local hostAliasesJson='[]'

for svc in $(kubectl get svc | awk '{print $3 "|" $1}' | grep -E '^[0-9].*'); do
hostname=$(echo ${svc} | awk -F '|' '{print $2}')
ip=$(echo ${svc} | awk -F '|' '{print $1}')
hostname=$(echo "${svc}" | awk -F '|' '{print $2}')
ip=$(echo "${svc}" | awk -F '|' '{print $1}')
hostAlias="{\"ip\": \"${ip}\", \"hostnames\": [\"${hostname}.clouddemo.xyz\"]}"
hostAliasesJson=$(echo $hostAliasesJson | jq --argjson newAlias "$hostAlias" '. += [$newAlias]')
hostAliasesJson=$(echo "$hostAliasesJson" | jq --argjson newAlias "$hostAlias" '. += [$newAlias]')
done

kubectl_bin patch deployment psmdb-client --type='json' -p="[{'op': 'replace', 'path': '/spec/replicas', 'value': 0}]"
Expand All @@ -22,83 +22,95 @@ configure_client_hostAliases() {

kubectl_bin patch deployment psmdb-client --type='json' -p="[{'op': 'replace', 'path': '/spec/template/spec/hostAliases', 'value': $hostAliasesJson}, {'op': 'replace', 'path': '/spec/replicas', 'value': 1}]"

wait_pod $(kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}')
wait_pod "$(kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}')"
}

create_infra ${namespace}
main() {
create_infra "${namespace}"
deploy_cert_manager

cluster="some-name"
kubectl_bin apply \
-f ${conf_dir}/secrets_with_tls.yml \
-f ${conf_dir}/client_with_tls.yml
cluster="some-name"
kubectl_bin apply \
-f "${conf_dir}"/secrets.yml \
-f "${conf_dir}"/client_with_tls.yml

apply_cluster ${test_dir}/conf/${cluster}-3horizons.yml
wait_for_running "${cluster}-rs0" 3
wait_cluster_consistency ${cluster}
apply_cluster "${test_dir}"/conf/${cluster}-3horizons.yml
wait_for_running "${cluster}-rs0" 3
wait_cluster_consistency ${cluster}

configure_client_hostAliases
desc 'compare certificates and issuers'
compare_kubectl "certificate/${cluster}-ssl"
compare_kubectl "certificate/${cluster}-ssl-internal"
compare_kubectl "certificate/${cluster}-ca-cert"
compare_kubectl "issuer/$cluster-psmdb-ca-issuer"
compare_kubectl "issuer/$cluster-psmdb-issuer"

sleep 10 # give some time for client pod to be ready
configure_client_hostAliases

run_mongo_tls "rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))" \
"clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" \
mongodb "" "--quiet" | egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|does not match the remote host name' >${tmp_dir}/horizons-3.json
diff $test_dir/compare/horizons-3.json $tmp_dir/horizons-3.json
sleep 10 # give some time for client pod to be ready

isMaster=$(run_mongo_tls "db.hello().isWritablePrimary" "clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" mongodb "" "--quiet" | egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|does not match the remote host name' | grep -v certificateNames)
if [ "${isMaster}" != "true" ]; then
echo "mongo client should've redirect the connection to primary"
exit 1
fi
run_mongo_tls "rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))" \
"clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" \
mongodb "" "--quiet" | grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|does not match the remote host name' >"${tmp_dir}"/horizons-3.json
diff "$test_dir"/compare/horizons-3.json "$tmp_dir"/horizons-3.json

# stepping down to ensure we haven't redirected to primary just because primary is pod-0
run_mongo_tls "rs.stepDown()" \
"clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" \
mongodb "" "--quiet"
isMaster=$(run_mongo_tls "db.hello().isWritablePrimary" "clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" mongodb "" "--quiet" | grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|does not match the remote host name' | grep -v certificateNames)
if [ "${isMaster}" != "true" ]; then
echo "mongo client should've redirect the connection to primary"
exit 1
fi

sleep 10 # give some time for re-election
# stepping down to ensure we haven't redirected to primary just because primary is pod-0
run_mongo_tls "rs.stepDown()" \
"clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" \
mongodb "" "--quiet"

isMaster=$(run_mongo_tls "db.hello().isWritablePrimary" "clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" mongodb "" "--quiet" | egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|does not match the remote host name' | grep -v certificateNames)
if [ "${isMaster}" != "true" ]; then
echo "mongo client should've redirect the connection to primary"
exit 1
fi
sleep 10 # give some time for re-election

desc "scaling up the cluster"
isMaster=$(run_mongo_tls "db.hello().isWritablePrimary" "clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" mongodb "" "--quiet" | grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|does not match the remote host name' | grep -v certificateNames)
if [ "${isMaster}" != "true" ]; then
echo "mongo client should've redirect the connection to primary"
exit 1
fi

apply_cluster ${test_dir}/conf/${cluster}-5horizons.yml
wait_for_running "${cluster}-rs0" 3
wait_cluster_consistency ${cluster}
desc "scaling up the cluster"

# scale up and down
kubectl_bin patch psmdb ${cluster} \
--type='json' \
-p='[{"op": "replace", "path": "/spec/replsets/0/size", "value": 5}]'
wait_for_running "${cluster}-rs0" 5
wait_cluster_consistency ${cluster}
apply_cluster "${test_dir}"/conf/${cluster}-5horizons.yml
wait_for_running "${cluster}-rs0" 3
wait_cluster_consistency ${cluster}

run_mongo_tls "rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))" \
"clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" \
mongodb "" "--quiet" | egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|does not match the remote host name' >${tmp_dir}/horizons-5.json
diff $test_dir/compare/horizons-5.json $tmp_dir/horizons-5.json
# scale up and down
kubectl_bin patch psmdb ${cluster} \
--type='json' \
-p='[{"op": "replace", "path": "/spec/replsets/0/size", "value": 5}]'
wait_for_running "${cluster}-rs0" 5
wait_cluster_consistency ${cluster}

desc "scaling down the cluster"
run_mongo_tls "rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))" \
"clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" \
mongodb "" "--quiet" | grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|does not match the remote host name' >"${tmp_dir}"/horizons-5.json
diff "$test_dir"/compare/horizons-5.json "$tmp_dir"/horizons-5.json

kubectl_bin patch psmdb ${cluster} \
--type='json' \
-p='[{"op": "replace", "path": "/spec/replsets/0/size", "value": 3}]'
wait_for_running "${cluster}-rs0" 3
wait_cluster_consistency ${cluster}
desc "scaling down the cluster"

run_mongo_tls "rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))" \
"clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" \
mongodb "" "--quiet" | egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|does not match the remote host name' >${tmp_dir}/horizons.json
diff $test_dir/compare/horizons-3.json $tmp_dir/horizons-3.json
kubectl_bin patch psmdb ${cluster} \
--type='json' \
-p='[{"op": "replace", "path": "/spec/replsets/0/size", "value": 3}]'
wait_for_running "${cluster}-rs0" 3
wait_cluster_consistency ${cluster}

desc "remove horizon configuration"
run_mongo_tls "rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))" \
"clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" \
mongodb "" "--quiet" | grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|does not match the remote host name' >"${tmp_dir}"/horizons.json
diff "$test_dir"/compare/horizons-3.json "$tmp_dir"/horizons-3.json

apply_cluster ${test_dir}/conf/${cluster}.yml
wait_for_running "${cluster}-rs0" 3
wait_cluster_consistency ${cluster}
desc "remove horizon configuration"

destroy ${namespace}
apply_cluster "${test_dir}"/conf/${cluster}.yml
wait_for_running "${cluster}-rs0" 3
wait_cluster_consistency ${cluster}

destroy "${namespace}"
}

main
29 changes: 29 additions & 0 deletions pkg/apis/psmdb/v1/psmdb_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -764,6 +764,35 @@ type ReplsetSpec struct {
PrimaryPreferTagSelector PrimaryPreferTagSelectorSpec `json:"primaryPreferTagSelector,omitempty"`
}

func (r *ReplsetSpec) GetHorizons(withPorts bool) map[string]map[string]string {
horizons := make(map[string]map[string]string)
for podName, m := range r.Horizons {
overrides, ok := r.ReplsetOverrides[podName]
hasOverrides := ok && len(overrides.Horizons) > 0

for h, domain := range m {
if hasOverrides {
if d, ok := overrides.Horizons[h]; ok {
domain = d
}
}

idx := strings.IndexRune(domain, ':')
if withPorts && idx == -1 {
domain = fmt.Sprintf("%s:%d", domain, r.GetPort())
} else if !withPorts && idx != -1 {
domain = domain[:idx]
}

if m, ok := horizons[podName]; !ok || m == nil {
horizons[podName] = make(map[string]string)
}
horizons[podName][h] = domain
}
}
Copy link

Copilot AI Dec 24, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The GetHorizons method only iterates over base Horizons (line 769) and applies overrides to them. This means if a pod has horizons defined exclusively in ReplsetOverrides but not in the base Horizons map, those override-only horizons will be ignored. Consider also iterating over ReplsetOverrides to ensure all horizon configurations are included, even if they don't have a corresponding entry in the base Horizons map.

Suggested change
}
}
for podName, overrides := range r.ReplsetOverrides {
if len(overrides.Horizons) == 0 {
continue
}
// Skip pods that already have horizons from the base spec.
if _, exists := horizons[podName]; exists {
continue
}
horizons[podName] = make(map[string]string, len(overrides.Horizons))
for h, domain := range overrides.Horizons {
if withPorts {
if !strings.Contains(domain, ":") {
domain = fmt.Sprintf("%s:%d", domain, r.GetPort())
}
}
horizons[podName][h] = domain
}
}

Copilot uses AI. Check for mistakes.
return horizons
}

func (r *ReplsetSpec) PodName(cr *PerconaServerMongoDB, idx int) string {
return fmt.Sprintf("%s-%s-%d", cr.Name, r.Name, idx)
}
Expand Down
Loading
Loading