@@ -20,7 +20,6 @@ package controllers
20
20
import (
21
21
"context"
22
22
"fmt"
23
- "time"
24
23
25
24
"github.com/pkg/errors"
26
25
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -43,6 +42,7 @@ import (
43
42
"sigs.k8s.io/cluster-api/controllers/remote"
44
43
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
45
44
utilexp "sigs.k8s.io/cluster-api/exp/util"
45
+ "sigs.k8s.io/cluster-api/internal/util/ssa"
46
46
"sigs.k8s.io/cluster-api/test/infrastructure/container"
47
47
infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1"
48
48
infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1"
@@ -57,8 +57,7 @@ const (
57
57
// dockerMachinePoolLabel is the label used to identify the DockerMachinePool that is responsible for a Docker container.
58
58
dockerMachinePoolLabel = "docker.cluster.x-k8s.io/machine-pool"
59
59
60
- // requeueAfter is how long to wait before checking again to see if the DockerMachines are still provisioning or deleting.
61
- requeueAfter = 10 * time .Second
60
+ dockerMachinePoolControllerName = "dockermachinepool-controller"
62
61
)
63
62
64
63
// DockerMachinePoolReconciler reconciles a DockerMachinePool object.
@@ -71,6 +70,7 @@ type DockerMachinePoolReconciler struct {
71
70
// WatchFilterValue is the label value used to filter events prior to reconciliation.
72
71
WatchFilterValue string
73
72
73
+ ssaCache ssa.Cache
74
74
recorder record.EventRecorder
75
75
externalTracker external.ObjectTracker
76
76
}
@@ -140,7 +140,7 @@ func (r *DockerMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Re
140
140
141
141
// Handle deleted machines
142
142
if ! dockerMachinePool .ObjectMeta .DeletionTimestamp .IsZero () {
143
- return r .reconcileDelete (ctx , cluster , machinePool , dockerMachinePool )
143
+ return ctrl. Result {}, r .reconcileDelete (ctx , cluster , machinePool , dockerMachinePool )
144
144
}
145
145
146
146
// Add finalizer and the InfrastructureMachineKind if they aren't already present, and requeue if either were added.
@@ -194,21 +194,22 @@ func (r *DockerMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr
194
194
return errors .Wrap (err , "failed setting up with a controller manager" )
195
195
}
196
196
197
- r .recorder = mgr .GetEventRecorderFor ("dockermachinepool-controller" )
197
+ r .recorder = mgr .GetEventRecorderFor (dockerMachinePoolControllerName )
198
198
r .externalTracker = external.ObjectTracker {
199
199
Controller : c ,
200
200
Cache : mgr .GetCache (),
201
201
}
202
+ r .ssaCache = ssa .NewCache ()
202
203
203
204
return nil
204
205
}
205
206
206
- func (r * DockerMachinePoolReconciler ) reconcileDelete (ctx context.Context , cluster * clusterv1.Cluster , machinePool * expv1.MachinePool , dockerMachinePool * infraexpv1.DockerMachinePool ) (ctrl. Result , error ) {
207
+ func (r * DockerMachinePoolReconciler ) reconcileDelete (ctx context.Context , cluster * clusterv1.Cluster , machinePool * expv1.MachinePool , dockerMachinePool * infraexpv1.DockerMachinePool ) error {
207
208
log := ctrl .LoggerFrom (ctx )
208
209
209
210
dockerMachineList , err := getDockerMachines (ctx , r .Client , * cluster , * machinePool , * dockerMachinePool )
210
211
if err != nil {
211
- return ctrl. Result {}, err
212
+ return err
212
213
}
213
214
214
215
if len (dockerMachineList .Items ) > 0 {
@@ -229,10 +230,9 @@ func (r *DockerMachinePoolReconciler) reconcileDelete(ctx context.Context, clust
229
230
}
230
231
231
232
if len (errs ) > 0 {
232
- return ctrl. Result {}, kerrors .NewAggregate (errs )
233
+ return kerrors .NewAggregate (errs )
233
234
}
234
-
235
- return ctrl.Result {RequeueAfter : requeueAfter }, nil
235
+ return nil
236
236
}
237
237
238
238
// Once there are no DockerMachines left, ensure there are no Docker containers left behind.
@@ -243,21 +243,21 @@ func (r *DockerMachinePoolReconciler) reconcileDelete(ctx context.Context, clust
243
243
// List Docker containers, i.e. external machines in the cluster.
244
244
externalMachines , err := docker .ListMachinesByCluster (ctx , cluster , labelFilters )
245
245
if err != nil {
246
- return ctrl. Result {}, errors .Wrapf (err , "failed to list all machines in the cluster with label \" %s:%s\" " , dockerMachinePoolLabel , dockerMachinePool .Name )
246
+ return errors .Wrapf (err , "failed to list all machines in the cluster with label \" %s:%s\" " , dockerMachinePoolLabel , dockerMachinePool .Name )
247
247
}
248
248
249
249
// Providers should similarly ensure that all infrastructure instances are deleted even if the InfraMachine has not been created yet.
250
250
for _ , externalMachine := range externalMachines {
251
251
log .Info ("Deleting Docker container" , "container" , externalMachine .Name ())
252
252
if err := externalMachine .Delete (ctx ); err != nil {
253
- return ctrl. Result {}, errors .Wrapf (err , "failed to delete machine %s" , externalMachine .Name ())
253
+ return errors .Wrapf (err , "failed to delete machine %s" , externalMachine .Name ())
254
254
}
255
255
}
256
256
257
257
// Once all DockerMachines and Docker containers are deleted, remove the finalizer.
258
258
controllerutil .RemoveFinalizer (dockerMachinePool , infraexpv1 .MachinePoolFinalizer )
259
259
260
- return ctrl. Result {}, nil
260
+ return nil
261
261
}
262
262
263
263
func (r * DockerMachinePoolReconciler ) reconcileNormal (ctx context.Context , cluster * clusterv1.Cluster , machinePool * expv1.MachinePool , dockerMachinePool * infraexpv1.DockerMachinePool ) (ctrl.Result , error ) {
@@ -318,11 +318,7 @@ func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, clust
318
318
return ctrl.Result {}, nil
319
319
}
320
320
321
- dockerMachinePool .Status .Ready = false
322
- conditions .MarkFalse (dockerMachinePool , expv1 .ReplicasReadyCondition , expv1 .WaitingForReplicasReadyReason , clusterv1 .ConditionSeverityInfo , "" )
323
-
324
- // if some machine is still provisioning, force reconcile in few seconds to check again infrastructure.
325
- return ctrl.Result {RequeueAfter : requeueAfter }, nil
321
+ return r .updateStatus (ctx , cluster , machinePool , dockerMachinePool , dockerMachineList .Items )
326
322
}
327
323
328
324
func getDockerMachines (ctx context.Context , c client.Client , cluster clusterv1.Cluster , machinePool expv1.MachinePool , dockerMachinePool infraexpv1.DockerMachinePool ) (* infrav1.DockerMachineList , error ) {
@@ -380,6 +376,64 @@ func dockerMachineToDockerMachinePool(_ context.Context, o client.Object) []ctrl
380
376
return nil
381
377
}
382
378
379
+ // updateStatus updates the Status field for the MachinePool object.
380
+ // It checks for the current state of the replicas and updates the Status of the MachineSet.
381
+ func (r * DockerMachinePoolReconciler ) updateStatus (ctx context.Context , cluster * clusterv1.Cluster , machinePool * expv1.MachinePool , dockerMachinePool * infraexpv1.DockerMachinePool , dockerMachines []infrav1.DockerMachine ) (ctrl.Result , error ) {
382
+ log := ctrl .LoggerFrom (ctx )
383
+
384
+ // List the Docker containers. This corresponds to an InfraMachinePool instance for providers.
385
+ labelFilters := map [string ]string {dockerMachinePoolLabel : dockerMachinePool .Name }
386
+ externalMachines , err := docker .ListMachinesByCluster (ctx , cluster , labelFilters )
387
+ if err != nil {
388
+ return ctrl.Result {}, errors .Wrapf (err , "failed to list all external machines in the cluster" )
389
+ }
390
+
391
+ externalMachineMap := make (map [string ]* docker.Machine )
392
+ for _ , externalMachine := range externalMachines {
393
+ externalMachineMap [externalMachine .Name ()] = externalMachine
394
+ }
395
+ // We can use reuse getDeletionCandidates to get the list of ready DockerMachines and avoid another API call, even though we aren't deleting them here.
396
+ _ , readyMachines , err := r .getDeletionCandidates (ctx , dockerMachines , externalMachineMap , machinePool , dockerMachinePool )
397
+ if err != nil {
398
+ return ctrl.Result {}, err
399
+ }
400
+
401
+ readyReplicaCount := len (readyMachines )
402
+ desiredReplicas := int (* machinePool .Spec .Replicas )
403
+
404
+ switch {
405
+ // We are scaling up
406
+ case readyReplicaCount < desiredReplicas :
407
+ conditions .MarkFalse (dockerMachinePool , clusterv1 .ResizedCondition , clusterv1 .ScalingUpReason , clusterv1 .ConditionSeverityWarning , "Scaling up MachineSet to %d replicas (actual %d)" , desiredReplicas , readyReplicaCount )
408
+ // We are scaling down
409
+ case readyReplicaCount > desiredReplicas :
410
+ conditions .MarkFalse (dockerMachinePool , clusterv1 .ResizedCondition , clusterv1 .ScalingDownReason , clusterv1 .ConditionSeverityWarning , "Scaling down MachineSet to %d replicas (actual %d)" , desiredReplicas , readyReplicaCount )
411
+ default :
412
+ // Make sure last resize operation is marked as completed.
413
+ // NOTE: we are checking the number of machines ready so we report resize completed only when the machines
414
+ // are actually provisioned (vs reporting completed immediately after the last machine object is created). This convention is also used by KCP.
415
+ if len (dockerMachines ) == readyReplicaCount {
416
+ if conditions .IsFalse (dockerMachinePool , clusterv1 .ResizedCondition ) {
417
+ log .Info ("All the replicas are ready" , "replicas" , readyReplicaCount )
418
+ }
419
+ conditions .MarkTrue (dockerMachinePool , clusterv1 .ResizedCondition )
420
+ }
421
+ // This means that there was no error in generating the desired number of machine objects
422
+ conditions .MarkTrue (dockerMachinePool , clusterv1 .MachinesCreatedCondition )
423
+ }
424
+
425
+ getters := make ([]conditions.Getter , 0 , len (dockerMachines ))
426
+ for i := range dockerMachines {
427
+ getters = append (getters , & dockerMachines [i ])
428
+ }
429
+
430
+ // Aggregate the operational state of all the machines; while aggregating we are adding the
431
+ // source ref (reason@machine/name) so the problem can be easily tracked down to its source machine.
432
+ conditions .SetAggregate (dockerMachinePool , expv1 .ReplicasReadyCondition , getters , conditions .AddSourceRef ())
433
+
434
+ return ctrl.Result {}, nil
435
+ }
436
+
383
437
func patchDockerMachinePool (ctx context.Context , patchHelper * patch.Helper , dockerMachinePool * infraexpv1.DockerMachinePool ) error {
384
438
conditions .SetSummary (dockerMachinePool ,
385
439
conditions .WithConditions (
0 commit comments