Skip to content

Commit eba0375

Browse files
authored
Merge pull request kubernetes-sigs#10952 from chrischdi/pr-machinepool-flake-fix
🌱 test: fix machinepool test to wait for topology controller to set correct number of replicas first
2 parents 608c403 + 5505c78 commit eba0375

File tree

2 files changed

+12
-10
lines changed

2 files changed

+12
-10
lines changed

test/framework/machinepool_helpers.go

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -232,20 +232,22 @@ func ScaleMachinePoolTopologyAndWait(ctx context.Context, input ScaleMachinePool
232232
return patchHelper.Patch(ctx, input.Cluster)
233233
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to scale machine pool topology %s", mpTopology.Name)
234234

235-
log.Logf("Waiting for correct number of replicas to exist")
235+
log.Logf("Waiting for correct number of replicas to exist and have correct number for .spec.replicas")
236236
mpList := &expv1.MachinePoolList{}
237-
Eventually(func() error {
238-
return input.ClusterProxy.GetClient().List(ctx, mpList,
237+
mp := expv1.MachinePool{}
238+
Eventually(func(g Gomega) int32 {
239+
g.Expect(input.ClusterProxy.GetClient().List(ctx, mpList,
239240
client.InNamespace(input.Cluster.Namespace),
240241
client.MatchingLabels{
241242
clusterv1.ClusterNameLabel: input.Cluster.Name,
242243
clusterv1.ClusterTopologyMachinePoolNameLabel: mpTopology.Name,
243244
},
244-
)
245-
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list MachinePools object for Cluster %s", klog.KRef(input.Cluster.Namespace, input.Cluster.Name))
245+
)).ToNot(HaveOccurred())
246+
g.Expect(mpList.Items).To(HaveLen(1))
247+
mp = mpList.Items[0]
248+
return *mp.Spec.Replicas
249+
}, retryableOperationTimeout, retryableOperationInterval).Should(Equal(input.Replicas), "MachinePool replicas for Cluster %s does not match set topology replicas", klog.KRef(input.Cluster.Namespace, input.Cluster.Name))
246250

247-
Expect(mpList.Items).To(HaveLen(1))
248-
mp := mpList.Items[0]
249251
WaitForMachinePoolNodesToExist(ctx, WaitForMachinePoolNodesToExistInput{
250252
Getter: input.Getter,
251253
MachinePool: &mp,

test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -378,7 +378,7 @@ func dockerMachineToDockerMachinePool(_ context.Context, o client.Object) []ctrl
378378
}
379379

380380
// updateStatus updates the Status field for the MachinePool object.
381-
// It checks for the current state of the replicas and updates the Status of the MachineSet.
381+
// It checks for the current state of the replicas and updates the Status of the MachinePool.
382382
func (r *DockerMachinePoolReconciler) updateStatus(ctx context.Context, cluster *clusterv1.Cluster, machinePool *expv1.MachinePool, dockerMachinePool *infraexpv1.DockerMachinePool, dockerMachines []infrav1.DockerMachine) (ctrl.Result, error) {
383383
log := ctrl.LoggerFrom(ctx)
384384

@@ -405,10 +405,10 @@ func (r *DockerMachinePoolReconciler) updateStatus(ctx context.Context, cluster
405405
switch {
406406
// We are scaling up
407407
case readyReplicaCount < desiredReplicas:
408-
conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up MachineSet to %d replicas (actual %d)", desiredReplicas, readyReplicaCount)
408+
conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up MachinePool to %d replicas (actual %d)", desiredReplicas, readyReplicaCount)
409409
// We are scaling down
410410
case readyReplicaCount > desiredReplicas:
411-
conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down MachineSet to %d replicas (actual %d)", desiredReplicas, readyReplicaCount)
411+
conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down MachinePool to %d replicas (actual %d)", desiredReplicas, readyReplicaCount)
412412
default:
413413
// Make sure last resize operation is marked as completed.
414414
// NOTE: we are checking the number of machines ready so we report resize completed only when the machines

0 commit comments

Comments
 (0)