Skip to content
This repository was archived by the owner on Aug 12, 2025. It is now read-only.

Commit f3ee310

Browse files
committed
feat: add finalizer code
Signed-off-by: Chris Privitere <[email protected]>
1 parent ed3442f commit f3ee310

File tree

3 files changed

+24
-10
lines changed

3 files changed

+24
-10
lines changed

api/v1beta1/packetcluster_types.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,9 @@ import (
2222
)
2323

2424
const (
25+
// ClusterFinalizer allows DockerClusterReconciler to clean up resources associated with DockerCluster before
26+
// removing it from the apiserver.
27+
ClusterFinalizer = "packetcluster.infrastructure.cluster.x-k8s.io"
2528
// NetworkInfrastructureReadyCondition reports of current status of cluster infrastructure.
2629
NetworkInfrastructureReadyCondition clusterv1.ConditionType = "NetworkInfrastructureReady"
2730
)

controllers/packetcluster_controller.go

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ import (
3232
"sigs.k8s.io/controller-runtime/pkg/builder"
3333
"sigs.k8s.io/controller-runtime/pkg/client"
3434
"sigs.k8s.io/controller-runtime/pkg/controller"
35+
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
3536
"sigs.k8s.io/controller-runtime/pkg/handler"
3637

3738
infrav1 "sigs.k8s.io/cluster-api-provider-packet/api/v1beta1"
@@ -103,7 +104,14 @@ func (r *PacketClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques
103104

104105
// Handle deleted clusters
105106
if !cluster.DeletionTimestamp.IsZero() {
106-
return r.reconcileDelete(ctx, clusterScope)
107+
return ctrl.Result{}, r.reconcileDelete(ctx, clusterScope)
108+
}
109+
110+
// Add finalizer first if not set to avoid the race condition between init and delete.
111+
// Note: Finalizers in general can only be added when the deletionTimestamp is not set.
112+
if !controllerutil.ContainsFinalizer(packetcluster, infrav1.ClusterFinalizer) {
113+
controllerutil.AddFinalizer(packetcluster, infrav1.ClusterFinalizer)
114+
return ctrl.Result{}, nil
107115
}
108116

109117
err = r.reconcileNormal(ctx, clusterScope)
@@ -178,7 +186,7 @@ func (r *PacketClusterReconciler) reconcileNormal(ctx context.Context, clusterSc
178186
return nil
179187
}
180188

181-
func (r *PacketClusterReconciler) reconcileDelete(ctx context.Context, clusterScope *scope.ClusterScope) (ctrl.Result, error) {
189+
func (r *PacketClusterReconciler) reconcileDelete(ctx context.Context, clusterScope *scope.ClusterScope) error {
182190
log := ctrl.LoggerFrom(ctx).WithValues("cluster", clusterScope.Cluster.Name)
183191
log.Info("Reconciling PacketCluster Deletion")
184192

@@ -190,15 +198,17 @@ func (r *PacketClusterReconciler) reconcileDelete(ctx context.Context, clusterSc
190198
lb := emlb.NewEMLB(r.PacketClient.GetConfig().DefaultHeader["X-Auth-Token"], packetCluster.Spec.ProjectID, packetCluster.Spec.Metro)
191199

192200
if err := lb.DeleteLoadBalancer(ctx, clusterScope); err != nil {
193-
fmt.Println("It's ok!")
201+
return fmt.Errorf("failed to delete load balancer: %w", err)
194202
}
195203
}
196204
// Initially I created this handler to remove an elastic IP when a cluster
197205
// gets delete, but it does not sound like a good idea. It is better to
198206
// leave to the users the ability to decide if they want to keep and resign
199207
// the IP or if they do not need it anymore
200208

201-
return ctrl.Result{}, nil
209+
// Cluster is deleted so remove the finalizer.
210+
controllerutil.RemoveFinalizer(packetCluster, infrav1.ClusterFinalizer)
211+
return nil
202212
}
203213

204214
func (r *PacketClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {

controllers/packetmachine_controller.go

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -164,6 +164,13 @@ func (r *PacketMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques
164164
}
165165
}()
166166

167+
// Add finalizer first if not set to avoid the race condition between init and delete.
168+
// Note: Finalizers in general can only be added when the deletionTimestamp is not set.
169+
if packetmachine.ObjectMeta.DeletionTimestamp.IsZero() && !controllerutil.ContainsFinalizer(packetmachine, infrav1.MachineFinalizer) {
170+
controllerutil.AddFinalizer(packetmachine, infrav1.MachineFinalizer)
171+
return ctrl.Result{}, nil
172+
}
173+
167174
// Handle deleted machines
168175
if !packetmachine.ObjectMeta.DeletionTimestamp.IsZero() {
169176
err = r.reconcileDelete(ctx, machineScope)
@@ -262,12 +269,6 @@ func (r *PacketMachineReconciler) reconcile(ctx context.Context, machineScope *s
262269
return ctrl.Result{}, nil
263270
}
264271

265-
// If the PacketMachine doesn't have our finalizer, add it.
266-
controllerutil.AddFinalizer(packetmachine, infrav1.MachineFinalizer)
267-
if err := machineScope.PatchObject(ctx); err != nil {
268-
log.Error(err, "unable to patch object")
269-
}
270-
271272
if !machineScope.Cluster.Status.InfrastructureReady {
272273
log.Info("Cluster infrastructure is not ready yet")
273274
conditions.MarkFalse(machineScope.PacketMachine, infrav1.DeviceReadyCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "")

0 commit comments

Comments
 (0)