Skip to content

Commit 3208ec5

Browse files
tjmoore4andrewlecuyer
authored andcommitted
Updates to pgcluster annotations for current primary status and
podAntiAffinity key update. Previously, the 'current primary' name of a particular Postgres cluster managed by the Postgres Operator has been stored in various locations and, depending on version, updated inconsistently. This can result in a lack of clarity as to which pod is currently acting in the Primary/Master role should the cluster be upgraded in the future. Beyond the obvious confusion this may cause, it is essential for the upcoming pgcluster upgrade process to properly identify the correct primary pod after a failover, restore or similar event. This update seeks to improve upon the current identification method. For example, after a failover, pgcluster 'mycluster' may have a primary pod, deployment and PVC named 'mycluster-abcd' Should this pgcluster be recreated, as happens with the upcoming upgrade procedure we would want to ensure the correct PVC is utilized. To facilitate this, a new annotation 'current-primary' has been added to the pgcluster CRD. This value will be updated, along with the 'deployment-name' label, 'primary-deployment' annotation and 'PrimaryStorage' name in the relevant CRD during a failover or cluster restore. This will allow for a recreated cluster to point to a PVC of previous replica that was promoted to be the current primary pod. Should this pgcluster be recreated, as happens with the upcoming upgrade procedure we would want to ensure the correct PVC is utilized. The pgcluster Spec is also updated to refer to a json value key of "podAntiAffinity" instead of "podPodAntiAffinity" which both corrects a confusing naming convention, but reflects the underlying type change between this and previous Operator versions.
1 parent fd32251 commit 3208ec5

File tree

11 files changed

+225
-34
lines changed

11 files changed

+225
-34
lines changed

apis/crunchydata.com/v1/cluster.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ type PgclusterSpec struct {
106106
Status string `json:"status"`
107107
CustomConfig string `json:"customconfig"`
108108
UserLabels map[string]string `json:"userlabels"`
109-
PodAntiAffinity PodAntiAffinitySpec `json:"podPodAntiAffinity"`
109+
PodAntiAffinity PodAntiAffinitySpec `json:"podAntiAffinity"`
110110
SyncReplication *bool `json:"syncReplication"`
111111
BackrestS3Bucket string `json:"backrestS3Bucket"`
112112
BackrestS3Region string `json:"backrestS3Region"`

apiserver/clusterservice/clusterimpl.go

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1270,6 +1270,14 @@ func getClusterParams(request *msgs.CreateClusterRequest, name string, userLabel
12701270
spec.BackrestS3Region = request.BackrestS3Region
12711271
}
12721272

1273+
// create a map for the annotations
1274+
annotations := map[string]string{}
1275+
// store the default current primary value as an annotation
1276+
annotations[config.ANNOTATION_CURRENT_PRIMARY] = spec.Name
1277+
// store the inital deployment value, which will match the
1278+
// cluster name initially
1279+
annotations[config.ANNOTATION_PRIMARY_DEPLOYMENT] = spec.Name
1280+
12731281
labels := make(map[string]string)
12741282
labels[config.LABEL_NAME] = name
12751283
if !request.AutofailFlag || apiserver.Pgo.Cluster.DisableAutofail {
@@ -1283,7 +1291,7 @@ func getClusterParams(request *msgs.CreateClusterRequest, name string, userLabel
12831291
// set the pgBackRest repository path
12841292
spec.BackrestRepoPath = request.BackrestRepoPath
12851293

1286-
//pgbadger - set with global flag first then check for a user flag
1294+
// pgbadger - set with global flag first then check for a user flag
12871295
labels[config.LABEL_BADGER] = strconv.FormatBool(apiserver.BadgerFlag)
12881296
if request.BadgerFlag {
12891297
labels[config.LABEL_BADGER] = "true"
@@ -1295,8 +1303,9 @@ func getClusterParams(request *msgs.CreateClusterRequest, name string, userLabel
12951303

12961304
newInstance := &crv1.Pgcluster{
12971305
ObjectMeta: meta_v1.ObjectMeta{
1298-
Name: name,
1299-
Labels: labels,
1306+
Name: name,
1307+
Labels: labels,
1308+
Annotations: annotations,
13001309
},
13011310
Spec: spec,
13021311
Status: crv1.PgclusterStatus{

apiservermsgs/common.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,10 @@ const PGO_VERSION = "4.3.0"
2121
const Ok = "ok"
2222
const Error = "error"
2323

24+
// UpgradeError is the error used for when a command is tried against a cluster that has not
25+
// been upgraded to the current Operator version
26+
const UpgradeError = " has not yet been upgraded. Please upgrade the cluster before running this Postgres Operator command."
27+
2428
// Status ...
2529
// swagger:model Status
2630
type Status struct {

config/annotations.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,4 +24,12 @@ const (
2424
ANNOTATION_CLONE_SOURCE_CLUSTER_NAME = "clone-source-cluster-name"
2525
ANNOTATION_CLONE_TARGET_CLUSTER_NAME = "clone-target-cluster-name"
2626
ANNOTATION_PRIMARY_DEPLOYMENT = "primary-deployment"
27+
// annotation to track the cluster's current primary
28+
ANNOTATION_CURRENT_PRIMARY = "current-primary"
29+
// annotation to indicate whether a cluster has been upgraded
30+
ANNOTATION_IS_UPGRADED = "is-upgraded"
31+
// annotation to store the Operator versions upgraded from and to
32+
ANNOTATION_UPGRADE_INFO = "upgrade-info"
33+
// annotation to store the string boolean, used when checking upgrade status
34+
ANNOTATIONS_FALSE = "false"
2735
)

controller/pod/podcontroller.go

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import (
2121
crv1 "github.com/crunchydata/postgres-operator/apis/crunchydata.com/v1"
2222
"github.com/crunchydata/postgres-operator/config"
2323
"github.com/crunchydata/postgres-operator/kubeapi"
24+
"github.com/crunchydata/postgres-operator/util"
2425

2526
log "github.com/sirupsen/logrus"
2627
apiv1 "k8s.io/api/core/v1"
@@ -117,6 +118,10 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) {
117118
if isPromotedPostgresPod(oldPod, newPod) {
118119
log.Debugf("Pod Controller: pod %s in namespace %s promoted, calling pod promotion "+
119120
"handler", newPod.Name, newPod.Namespace)
121+
122+
// update the pgcluster's current primary information to match the promotion
123+
setCurrentPrimary(c.PodClient, newPod, &cluster)
124+
120125
if err := c.handlePostgresPodPromotion(newPod, cluster); err != nil {
121126
log.Error(err)
122127
return
@@ -126,6 +131,7 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) {
126131
if isPromotedStandby(oldPod, newPod) {
127132
log.Debugf("Pod Controller: standby pod %s in namespace %s promoted, calling standby pod "+
128133
"promotion handler", newPod.Name, newPod.Namespace)
134+
129135
if err := c.handleStandbyPromotion(newPod, cluster); err != nil {
130136
log.Error(err)
131137
return
@@ -145,6 +151,20 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) {
145151
return
146152
}
147153

154+
// setCurrentPrimary checks whether the newly promoted primary value differs from the pgcluster's
155+
// current primary value. If different, patch the CRD's annotation to match the new value
156+
func setCurrentPrimary(restclient *rest.RESTClient, newPod *apiv1.Pod, cluster *crv1.Pgcluster) {
157+
// if a failover has occured and the current primary has changed, update the pgcluster CRD's annotation accordingly
158+
if cluster.Annotations[config.ANNOTATION_CURRENT_PRIMARY] != newPod.ObjectMeta.Labels[config.LABEL_DEPLOYMENT_NAME] {
159+
err := util.CurrentPrimaryUpdate(restclient, cluster, newPod.ObjectMeta.Labels[config.LABEL_DEPLOYMENT_NAME], newPod.Namespace)
160+
if err != nil {
161+
log.Errorf("PodController unable to patch pgcluster %s with currentprimary value %s Error: %s", cluster.Spec.ClusterName,
162+
newPod.ObjectMeta.Labels[config.LABEL_DEPLOYMENT_NAME], err)
163+
return
164+
}
165+
}
166+
}
167+
148168
// onDelete is called when a pgcluster is deleted
149169
func (c *Controller) onDelete(obj interface{}) {
150170

kubeapi/service.go

Lines changed: 103 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,13 @@ package kubeapi
1616
*/
1717

1818
import (
19+
"encoding/json"
20+
1921
log "github.com/sirupsen/logrus"
20-
"k8s.io/api/core/v1"
22+
v1 "k8s.io/api/core/v1"
2123
kerrors "k8s.io/apimachinery/pkg/api/errors"
2224
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
25+
"k8s.io/apimachinery/pkg/types"
2326
"k8s.io/client-go/kubernetes"
2427
)
2528

@@ -85,3 +88,102 @@ func UpdateService(clientset *kubernetes.Clientset, svc *v1.Service, namespace s
8588
return err
8689

8790
}
91+
92+
// ServicePortPatchSpec holds the relevant information for making a JSON patch to
93+
// add a port to a service
94+
type ServicePortPatchSpec struct {
95+
Op string `json:"op"`
96+
Path string `json:"path"`
97+
Value ServicePortSpec `json:"value"`
98+
}
99+
100+
// ServicePortSpec holds the specific port info needed when patching a service during a
101+
// cluster upgrade and is part of the above ServicePortPatchSpec
102+
type ServicePortSpec struct {
103+
Name string `json:"name"`
104+
Port int `json:"port"`
105+
Protocol string `json:"protocol"`
106+
TargetPort int `json:"targetPort"`
107+
}
108+
109+
// PortPatch returns a struct to use when patching in a new port to a service.
110+
func PortPatch(name, protocol string, port, targetport int) []ServicePortSpec {
111+
portPatch := make([]ServicePortSpec, 1)
112+
portPatch[0].Name = name
113+
portPatch[0].Port = port
114+
portPatch[0].Protocol = protocol
115+
portPatch[0].TargetPort = targetport
116+
117+
return portPatch
118+
}
119+
120+
// ServiceSelectorPatchSpec holds the relevant selector information used when making a JSON patch on a service
121+
type ServiceSelectorPatchSpec struct {
122+
Op string `json:"op"`
123+
Path string `json:"path"`
124+
Value ServiceSelectorSpec `json:"value"`
125+
}
126+
127+
// ServiceSelectorSpec holds the information needed for selecting the appropriate service
128+
// being used by a pgcluster that we want to patch
129+
type ServiceSelectorSpec struct {
130+
Pgcluster string `json:"pg-cluster"`
131+
Role string `json:"role"`
132+
}
133+
134+
// SelectorPatches returns the needed selector struct used when patching a pgcluster's service
135+
func SelectorPatches(servicename, role string) []ServiceSelectorSpec {
136+
selectorPatch := make([]ServiceSelectorSpec, 2)
137+
selectorPatch[0].Pgcluster = servicename
138+
selectorPatch[0].Role = role
139+
140+
return selectorPatch
141+
}
142+
143+
// PatchServicePort performs a JSON patch on a service to modify the ports defined for a given service
144+
// As it performs a JSON patch, the supported operations should include: “add”, “remove”, “replace”, “move”, “copy” and “test”
145+
func PatchServicePort(clientset *kubernetes.Clientset, servicename, namespace, op, jsonpath string, portpatch ServicePortSpec) {
146+
var patchBytes []byte
147+
var err error
148+
149+
patch := make([]ServicePortPatchSpec, 1)
150+
patch[0].Op = op
151+
patch[0].Path = jsonpath
152+
patch[0].Value = portpatch
153+
154+
patchBytes, err = json.Marshal(patch)
155+
if err != nil {
156+
log.Error("error in converting patch " + err.Error())
157+
} else {
158+
applyServicePatch(clientset, namespace, servicename, patchBytes)
159+
}
160+
}
161+
162+
// PatchServiceSelector replaces the selector section of a service definition with the selector patch provided.
163+
// As it performs a JSON patch, the supported operations should include: “add”, “remove”, “replace”, “move”, “copy” and “test”
164+
func PatchServiceSelector(clientset *kubernetes.Clientset, servicename, namespace, op, jsonpath string, selectorpatch ServiceSelectorSpec) {
165+
var patchBytes []byte
166+
var err error
167+
168+
patch := make([]ServiceSelectorPatchSpec, 1)
169+
patch[0].Op = op
170+
patch[0].Path = jsonpath
171+
patch[0].Value = selectorpatch
172+
173+
patchBytes, err = json.Marshal(patch)
174+
if err != nil {
175+
log.Error("error in converting patch " + err.Error())
176+
} else {
177+
//apply the service
178+
applyServicePatch(clientset, namespace, servicename, patchBytes)
179+
}
180+
}
181+
182+
// applyServicePatch performs a JSON patch with the patch information provided.
183+
func applyServicePatch(clientset *kubernetes.Clientset, namespace, servicename string, patchBytes []byte) {
184+
log.Info("patch Service " + servicename)
185+
if _, err := clientset.CoreV1().Services(namespace).Patch(servicename, types.JSONPatchType, patchBytes); err != nil {
186+
log.Error(err)
187+
log.Error("error patching Service " + servicename)
188+
}
189+
}

operator/backrest/restore.go

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ func createRestoredDeployment(restclient *rest.RESTClient, cluster *crv1.Pgclust
300300
cluster.Spec.UserLabels[config.LABEL_PG_CLUSTER] = cluster.Spec.ClusterName
301301

302302
// Set the Patroni scope to the name of the primary deployment. Replicas will get scope using the
303-
// 'current-primary' label on the pgcluster
303+
// 'crunchy-pgha-scope' label
304304
cluster.Spec.UserLabels[config.LABEL_PGHA_SCOPE] = restoreToName
305305

306306
archiveMode := "on"
@@ -406,12 +406,10 @@ func createRestoredDeployment(restclient *rest.RESTClient, cluster *crv1.Pgclust
406406
return err
407407
}
408408

409-
cluster.Spec.UserLabels[config.LABEL_CURRENT_PRIMARY] = restoreToName
410-
409+
// store the workflowID in a user label
411410
cluster.Spec.UserLabels[crv1.PgtaskWorkflowID] = workflowID
412-
413-
err = util.PatchClusterCRD(restclient, cluster.Spec.UserLabels, cluster, namespace)
414-
if err != nil {
411+
// patch the pgcluster CRD with the updated info
412+
if err = util.PatchClusterCRD(restclient, cluster.Spec.UserLabels, cluster, restoreToName, namespace); err != nil {
415413
log.Error("could not patch primary crv1 with labels")
416414
return err
417415
}

operator/cluster/cluster.go

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ import (
3333

3434
log "github.com/sirupsen/logrus"
3535
apps_v1 "k8s.io/api/apps/v1"
36-
"k8s.io/api/core/v1"
36+
v1 "k8s.io/api/core/v1"
3737
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3838
"k8s.io/client-go/kubernetes"
3939
"k8s.io/client-go/rest"
@@ -71,7 +71,7 @@ func AddClusterBase(clientset *kubernetes.Clientset, client *rest.RESTClient, cl
7171
}
7272

7373
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
74-
clientset, cl, namespace, cl.Spec.Name, cl.Spec.PrimaryStorage)
74+
clientset, cl, namespace, cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], cl.Spec.PrimaryStorage)
7575
if err != nil {
7676
log.Error(err)
7777
publishClusterCreateFailure(cl, err.Error())
@@ -93,7 +93,9 @@ func AddClusterBase(clientset *kubernetes.Clientset, client *rest.RESTClient, cl
9393
if err != nil {
9494
log.Error("error in status patch " + err.Error())
9595
}
96+
9697
err = util.Patch(client, "/spec/PrimaryStorage/name", dataVolume.PersistentVolumeClaimName, crv1.PgclusterResourcePlural, cl.Spec.Name, namespace)
98+
9799
if err != nil {
98100
log.Error("error in pvcname patch " + err.Error())
99101
}

operator/cluster/clusterlogic.go

Lines changed: 25 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ import (
3434
"github.com/crunchydata/postgres-operator/util"
3535
log "github.com/sirupsen/logrus"
3636
v1 "k8s.io/api/apps/v1"
37+
kerrors "k8s.io/apimachinery/pkg/api/errors"
3738
"k8s.io/client-go/kubernetes"
3839
"k8s.io/client-go/rest"
3940
)
@@ -89,7 +90,15 @@ func addClusterCreateDeployments(clientset *kubernetes.Clientset, client *rest.R
8990
}
9091
}
9192

92-
cl.Spec.UserLabels[config.LABEL_DEPLOYMENT_NAME] = cl.Spec.Name
93+
// if the current deployment label value does not match current primary name
94+
// update the label so that the new deployment will match the existing PVC
95+
// as determined previously
96+
// Note that the use of this value brings the initial deployment creation in line with
97+
// the paradigm used during cluster restoration, as in operator/backrest/restore.go
98+
if cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY] != cl.Spec.UserLabels[config.LABEL_DEPLOYMENT_NAME] {
99+
cl.Spec.UserLabels[config.LABEL_DEPLOYMENT_NAME] = cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY]
100+
}
101+
93102
cl.Spec.UserLabels[config.LABEL_PGOUSER] = cl.ObjectMeta.Labels[config.LABEL_PGOUSER]
94103
cl.Spec.UserLabels[config.LABEL_PG_CLUSTER_IDENTIFIER] = cl.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER]
95104

@@ -109,19 +118,19 @@ func addClusterCreateDeployments(clientset *kubernetes.Clientset, client *rest.R
109118

110119
//create the primary deployment
111120
deploymentFields := operator.DeploymentTemplateFields{
112-
Name: cl.Spec.Name,
121+
Name: cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY],
113122
IsInit: true,
114123
Replicas: "0",
115124
ClusterName: cl.Spec.Name,
116-
PrimaryHost: cl.Spec.Name,
125+
PrimaryHost: cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY],
117126
Port: cl.Spec.Port,
118127
CCPImagePrefix: util.GetValueOrDefault(cl.Spec.CCPImagePrefix, operator.Pgo.Cluster.CCPImagePrefix),
119128
CCPImage: cl.Spec.CCPImage,
120129
CCPImageTag: cl.Spec.CCPImageTag,
121130
PVCName: dataVolume.InlineVolumeSource(),
122131
DeploymentLabels: operator.GetLabelsFromMap(cl.Spec.UserLabels),
123132
PodLabels: operator.GetLabelsFromMap(cl.Spec.UserLabels),
124-
DataPathOverride: cl.Spec.Name,
133+
DataPathOverride: cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY],
125134
Database: cl.Spec.Database,
126135
ArchiveMode: archiveMode,
127136
SecurityContext: util.GetPodSecurityContext(supplementalGroups),
@@ -134,17 +143,17 @@ func addClusterCreateDeployments(clientset *kubernetes.Clientset, client *rest.R
134143
ConfVolume: operator.GetConfVolume(clientset, cl, namespace),
135144
CollectAddon: operator.GetCollectAddon(clientset, namespace, &cl.Spec),
136145
CollectVolume: operator.GetCollectVolume(clientset, cl, namespace),
137-
BadgerAddon: operator.GetBadgerAddon(clientset, namespace, cl, cl.Spec.Name),
146+
BadgerAddon: operator.GetBadgerAddon(clientset, namespace, cl, cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY]),
138147
PgmonitorEnvVars: operator.GetPgmonitorEnvVars(cl.Spec.UserLabels[config.LABEL_COLLECT]),
139148
ScopeLabel: config.LABEL_PGHA_SCOPE,
140-
PgbackrestEnvVars: operator.GetPgbackrestEnvVars(cl, cl.Labels[config.LABEL_BACKREST], cl.Spec.Name,
149+
PgbackrestEnvVars: operator.GetPgbackrestEnvVars(cl, cl.Labels[config.LABEL_BACKREST], cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY],
141150
cl.Spec.Port, cl.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE]),
142151
PgbackrestS3EnvVars: operator.GetPgbackrestS3EnvVars(*cl, clientset, namespace),
143152
EnableCrunchyadm: operator.Pgo.Cluster.EnableCrunchyadm,
144153
ReplicaReinitOnStartFail: !operator.Pgo.Cluster.DisableReplicaStartFailReinit,
145154
SyncReplication: operator.GetSyncReplication(cl.Spec.SyncReplication),
146155
Tablespaces: operator.GetTablespaceNames(cl.Spec.TablespaceMounts),
147-
TablespaceVolumes: operator.GetTablespaceVolumesJSON(cl.Spec.Name, tablespaceStorageTypeMap),
156+
TablespaceVolumes: operator.GetTablespaceVolumesJSON(cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], tablespaceStorageTypeMap),
148157
TablespaceVolumeMounts: operator.GetTablespaceVolumeMountsJSON(tablespaceStorageTypeMap),
149158
TLSEnabled: cl.Spec.TLS.IsTLSEnabled(),
150159
TLSOnly: cl.Spec.TLSOnly,
@@ -157,7 +166,10 @@ func addClusterCreateDeployments(clientset *kubernetes.Clientset, client *rest.R
157166
// initialization logic should be executed when the postgres-ha container is run. This
158167
// ensures that the original primary in a PG cluster does not attempt to run any initialization
159168
// logic following a restart of the container.
160-
if err = operator.CreatePGHAConfigMap(clientset, cl, namespace); err != nil {
169+
// If the configmap already exists, the cluster creation will continue as this is required
170+
// for certain pgcluster upgrades.
171+
if err = operator.CreatePGHAConfigMap(clientset, cl, namespace); err != nil &&
172+
!kerrors.IsAlreadyExists(err) {
161173
log.Error(err.Error())
162174
return err
163175
}
@@ -197,10 +209,10 @@ func addClusterCreateDeployments(clientset *kubernetes.Clientset, client *rest.R
197209
log.Info("primary Deployment " + cl.Spec.Name + " in namespace " + namespace + " already existed so not creating it ")
198210
}
199211

200-
cl.Spec.UserLabels[config.LABEL_CURRENT_PRIMARY] = cl.Spec.Name
201-
202-
err = util.PatchClusterCRD(client, cl.Spec.UserLabels, cl, namespace)
203-
if err != nil {
212+
// patch in the correct current primary value to the CRD spec, as well as
213+
// any updated user labels. This will handle both new and updated clusters.
214+
// Note: in previous operator versions, this was stored in a user label
215+
if err = util.PatchClusterCRD(client, cl.Spec.UserLabels, cl, cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], namespace); err != nil {
204216
log.Error("could not patch primary crv1 with labels")
205217
return err
206218
}
@@ -568,7 +580,7 @@ func ScaleClusterDeployments(clientset *kubernetes.Clientset, cluster crv1.Pgclu
568580
// determine if the deployment is a primary, replica, or supporting service (pgBackRest,
569581
// pgBouncer, etc.)
570582
switch {
571-
case deployment.Name == cluster.Labels[config.LABEL_CURRENT_PRIMARY]:
583+
case deployment.Name == cluster.Annotations[config.ANNOTATION_CURRENT_PRIMARY]:
572584
clusterInfo.PrimaryDeployment = deployment.Name
573585
// if not scaling the primary simply move on to the next deployment
574586
if !scalePrimary {

0 commit comments

Comments
 (0)