Skip to content

Commit 50741ff

Browse files
pooknullhors
andauthored
K8SPG-454: set ready status when all pg pods are updated (#869)
* K8SPG-454: set ready status when all pg pods are updated https://perconadev.atlassian.net/browse/K8SPG-454 * fix test * fix envtest * remove `.status.postgres.updated` --------- Co-authored-by: Viacheslav Sarzhan <[email protected]>
1 parent d99ad5c commit 50741ff

File tree

2 files changed

+37
-41
lines changed

2 files changed

+37
-41
lines changed

percona/controller/pgcluster/status.go

Lines changed: 33 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -36,34 +36,36 @@ func (r *PGClusterReconciler) getHost(ctx context.Context, cr *v2.PerconaPGClust
3636
return host, nil
3737
}
3838

39-
func (r *PGClusterReconciler) getState(cr *v2.PerconaPGCluster, status *v1beta1.PostgresClusterStatus) v2.AppState {
40-
var size, ready int
41-
for _, is := range status.InstanceSets {
42-
size = size + int(is.Replicas)
43-
ready = ready + int(is.ReadyReplicas)
44-
}
45-
39+
func (r *PGClusterReconciler) getState(cr *v2.PerconaPGCluster, status *v2.PerconaPGClusterStatus, crunchyStatus *v1beta1.PostgresClusterStatus) v2.AppState {
4640
if cr.Spec.Pause != nil && *cr.Spec.Pause {
47-
if ready > 0 {
41+
if status.Postgres.Ready > 0 {
4842
return v2.AppStateStopping
4943
}
5044

5145
return v2.AppStatePaused
5246
}
5347

54-
if status.PGBackRest != nil && status.PGBackRest.RepoHost != nil && !status.PGBackRest.RepoHost.Ready {
48+
if crunchyStatus.PGBackRest != nil && crunchyStatus.PGBackRest.RepoHost != nil && !crunchyStatus.PGBackRest.RepoHost.Ready {
49+
return v2.AppStateInit
50+
}
51+
52+
if status.PGBouncer.Ready != status.PGBouncer.Size {
5553
return v2.AppStateInit
5654
}
5755

58-
if status.Proxy.PGBouncer.ReadyReplicas != status.Proxy.PGBouncer.Replicas {
56+
if status.Postgres.Ready != status.Postgres.Size {
5957
return v2.AppStateInit
6058
}
6159

62-
if ready < size {
60+
var updatedPods int32
61+
for _, is := range crunchyStatus.InstanceSets {
62+
updatedPods += is.UpdatedReplicas
63+
}
64+
if updatedPods != status.Postgres.Size {
6365
return v2.AppStateInit
6466
}
6567

66-
if size == 0 {
68+
if status.Postgres.Size == 0 {
6769
return v2.AppStateInit
6870
}
6971

@@ -76,27 +78,17 @@ func (r *PGClusterReconciler) updateStatus(ctx context.Context, cr *v2.PerconaPG
7678
return errors.Wrap(err, "get app host")
7779
}
7880

79-
pgStatusFromCruncy := func() v2.PostgresStatus {
80-
var size, ready int32
81-
for _, is := range status.InstanceSets {
82-
size = size + is.Replicas
83-
ready = ready + is.ReadyReplicas
84-
}
85-
86-
ss := make([]v2.PostgresInstanceSetStatus, 0, len(status.InstanceSets))
87-
for _, is := range status.InstanceSets {
88-
ss = append(ss, v2.PostgresInstanceSetStatus{
89-
Name: is.Name,
90-
Size: is.Replicas,
91-
Ready: is.ReadyReplicas,
92-
})
93-
}
94-
95-
return v2.PostgresStatus{
96-
Size: size,
97-
Ready: ready,
98-
InstanceSets: ss,
99-
}
81+
var size, ready int32
82+
ss := make([]v2.PostgresInstanceSetStatus, 0, len(status.InstanceSets))
83+
for _, is := range status.InstanceSets {
84+
ss = append(ss, v2.PostgresInstanceSetStatus{
85+
Name: is.Name,
86+
Size: is.Replicas,
87+
Ready: is.ReadyReplicas,
88+
})
89+
90+
size += is.Replicas
91+
ready += is.ReadyReplicas
10092
}
10193

10294
if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
@@ -109,15 +101,20 @@ func (r *PGClusterReconciler) updateStatus(ctx context.Context, cr *v2.PerconaPG
109101
}
110102

111103
cluster.Status = v2.PerconaPGClusterStatus{
112-
Postgres: pgStatusFromCruncy(),
104+
Postgres: v2.PostgresStatus{
105+
Size: size,
106+
Ready: ready,
107+
InstanceSets: ss,
108+
},
113109
PGBouncer: v2.PGBouncerStatus{
114110
Size: status.Proxy.PGBouncer.Replicas,
115111
Ready: status.Proxy.PGBouncer.ReadyReplicas,
116112
},
117-
State: r.getState(cr, status),
118-
Host: host,
113+
Host: host,
119114
}
120115

116+
cluster.Status.State = r.getState(cr, &cluster.Status, status)
117+
121118
return r.Client.Status().Update(ctx, cluster)
122119
}); err != nil {
123120
return errors.Wrap(err, "update PerconaPGCluster status")

percona/controller/pgcluster/status_test.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,6 @@ var _ = Describe("PG Cluster status", Ordered, func() {
151151

152152
When("PGBackRest RepoHost is not ready", func() {
153153
It("state should be initializing", func() {
154-
155154
updateCrunchyPGClusterStatus(ctx, crNamespacedName, func(pgc *v1beta1.PostgresCluster) {
156155
pgc.Status.PGBackRest = &v1beta1.PGBackRestStatus{
157156
RepoHost: &v1beta1.RepoHostStatus{Ready: false},
@@ -229,6 +228,7 @@ var _ = Describe("PG Cluster status", Ordered, func() {
229228
pgc.Status.PGBackRest.RepoHost.Ready = true
230229
pgc.Status.Proxy.PGBouncer.ReadyReplicas = 1
231230
pgc.Status.InstanceSets[0].ReadyReplicas = 1
231+
pgc.Status.InstanceSets[0].UpdatedReplicas = 1
232232
})
233233

234234
reconcileAndAssertState(ctx, crNamespacedName, cr, v2.AppStateReady)
@@ -300,10 +300,9 @@ var _ = Describe("PG Cluster status", Ordered, func() {
300300
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(pgBouncerSVC), pgBouncerSVC)
301301
return err == nil
302302
}, time.Second*15, time.Millisecond*250).Should(BeTrue())
303-
pgBouncerSVC.Status.LoadBalancer.Ingress =
304-
append(pgBouncerSVC.Status.LoadBalancer.Ingress, corev1.LoadBalancerIngress{
305-
IP: "22.22.22.22",
306-
})
303+
pgBouncerSVC.Status.LoadBalancer.Ingress = append(pgBouncerSVC.Status.LoadBalancer.Ingress, corev1.LoadBalancerIngress{
304+
IP: "22.22.22.22",
305+
})
307306
Expect(k8sClient.Status().Update(ctx, pgBouncerSVC)).Should(Succeed())
308307

309308
_, err = reconciler(cr).Reconcile(ctx, ctrl.Request{NamespacedName: crNamespacedName})

0 commit comments

Comments
 (0)