Skip to content

Commit 8513169

Browse files
authored
Merge pull request #11515 from fabriziopandini/drop-retry-when-computing-KCP-conditions
🌱 Drop retry when computing KCP conditions
2 parents df36a1e + 0c9acd4 commit 8513169

File tree

2 files changed

+8
-4
lines changed

2 files changed

+8
-4
lines changed

controlplane/kubeadm/internal/workload_cluster_conditions.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,11 @@ func (w *Workload) UpdateEtcdConditions(ctx context.Context, controlPlane *Contr
6767
if controlPlane.IsEtcdManaged() {
6868
// Update etcd conditions.
6969
// In case of well known temporary errors + control plane scaling up/down or rolling out, retry a few times.
70-
// Note: this is required because there isn't a watch mechanism on etcd.
71-
maxRetry := 3
70+
// Note: it seems that reducing the number of them during every reconciles also improves stability,
71+
// thus we are stopping doing retries (we only try once).
72+
// However, we keep the code implementing retry support so we can easily revert this decision in a patch
73+
// release if we need to.
74+
maxRetry := 1
7275
for i := range maxRetry {
7376
retryableError := w.updateManagedEtcdConditions(ctx, controlPlane)
7477
// if we should retry and there is a retry left, wait a bit.

controlplane/kubeadm/internal/workload_cluster_conditions_test.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -154,8 +154,9 @@ func TestUpdateEtcdConditions(t *testing.T) {
154154
callCount = 0
155155
w.UpdateEtcdConditions(ctx, controlPane)
156156
if tt.expectedRetry {
157-
g.Expect(callCount).To(Equal(3))
158-
} else {
157+
// Note we keep the code implementing retry support so we can easily re-activate it if we need to.
158+
// g.Expect(callCount).To(Equal(3))
159+
// } else {
159160
g.Expect(callCount).To(Equal(1))
160161
}
161162
})

0 commit comments

Comments
 (0)