Skip to content

Commit 918b7d8

Browse files
authored
Merge pull request #12788 from sbueringer/pr-remove-ssa-cleanup
🌱 Remove unused CleanUpManagedFieldsForSSAAdoption code
2 parents dd27e08 + 9934c60 commit 918b7d8

File tree

8 files changed

+15
-556
lines changed

8 files changed

+15
-556
lines changed

controlplane/kubeadm/internal/controllers/controller.go

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -797,13 +797,6 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro
797797
continue
798798
}
799799

800-
// Cleanup managed fields of all Machines.
801-
// We do this so that Machines that were created/patched before the controller adopted Server-Side-Apply (SSA)
802-
// (< v1.4.0) can also work with SSA. Otherwise, fields would be co-owned by our "old" "manager" and
803-
// "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations.
804-
if err := ssa.CleanUpManagedFieldsForSSAAdoption(ctx, r.Client, m, kcpManagerName); err != nil {
805-
return errors.Wrapf(err, "failed to update Machine: failed to adjust the managedFields of the Machine %s", klog.KObj(m))
806-
}
807800
// Update Machine to propagate in-place mutable fields from KCP.
808801
updatedMachine, err := r.updateMachine(ctx, m, controlPlane.KCP, controlPlane.Cluster)
809802
if err != nil {

controlplane/kubeadm/internal/controllers/controller_test.go

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1803,16 +1803,15 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) {
18031803
InfrastructureRef: *infraMachineRef,
18041804
Version: "v1.25.3",
18051805
FailureDomain: fd,
1806-
ProviderID: "provider-id",
1806+
// ProviderID is intentionally not set here, this field is set by the Machine controller.
18071807
Deletion: clusterv1.MachineDeletionSpec{
18081808
NodeDrainTimeoutSeconds: duration5s,
18091809
NodeVolumeDetachTimeoutSeconds: duration5s,
18101810
NodeDeletionTimeoutSeconds: duration5s,
18111811
},
18121812
},
18131813
}
1814-
// Note: use "manager" as the field owner to mimic the manager used before ClusterAPI v1.4.0.
1815-
g.Expect(env.Create(ctx, inPlaceMutatingMachine, client.FieldOwner("manager"))).To(Succeed())
1814+
g.Expect(env.PatchAndWait(ctx, inPlaceMutatingMachine, client.FieldOwner(kcpManagerName))).To(Succeed())
18161815

18171816
// Existing machine that is in deleting state
18181817
deletingMachine := &clusterv1.Machine{
@@ -1845,7 +1844,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) {
18451844
ReadinessGates: mandatoryMachineReadinessGates,
18461845
},
18471846
}
1848-
g.Expect(env.Create(ctx, deletingMachine, client.FieldOwner(classicManager))).To(Succeed())
1847+
g.Expect(env.PatchAndWait(ctx, deletingMachine, client.FieldOwner(kcpManagerName))).To(Succeed())
18491848
// Delete the machine to put it in the deleting state
18501849
g.Expect(env.Delete(ctx, deletingMachine)).To(Succeed())
18511850
// Wait till the machine is marked for deletion
@@ -2094,13 +2093,13 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) {
20942093
g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed())
20952094

20962095
// Verify ManagedFields
2097-
g.Expect(updatedDeletingMachine.ManagedFields).ShouldNot(
2096+
g.Expect(updatedDeletingMachine.ManagedFields).Should(
20982097
ContainElement(ssa.MatchManagedFieldsEntry(kcpManagerName, metav1.ManagedFieldsOperationApply)),
2099-
"deleting machine should not contain an entry for SSA manager",
2098+
"deleting machine should contain an entry for SSA manager",
21002099
)
2101-
g.Expect(updatedDeletingMachine.ManagedFields).Should(
2102-
ContainElement(ssa.MatchManagedFieldsEntry("manager", metav1.ManagedFieldsOperationUpdate)),
2103-
"in-place mutable machine should still contain an entry for old manager",
2100+
g.Expect(updatedDeletingMachine.ManagedFields).ShouldNot(
2101+
ContainElement(ssa.MatchManagedFieldsEntry(classicManager, metav1.ManagedFieldsOperationUpdate)),
2102+
"deleting machine should not contain an entry for old manager",
21042103
)
21052104

21062105
// Verify the machine labels and annotations are unchanged.

internal/controllers/machinedeployment/machinedeployment_controller.go

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -286,19 +286,6 @@ func (r *Reconciler) reconcile(ctx context.Context, s *scope) error {
286286
}
287287
}
288288

289-
// Loop over all MachineSets and cleanup managed fields.
290-
// We do this so that MachineSets that were created/patched before (< v1.4.0) the controller adopted
291-
// Server-Side-Apply (SSA) can also work with SSA. Otherwise, fields would be co-owned by our "old" "manager" and
292-
// "capi-machinedeployment" and then we would not be able to e.g. drop labels and annotations.
293-
// Note: We are cleaning up managed fields for all MachineSets, so we're able to remove this code in a few
294-
// Cluster API releases. If we do this only for selected MachineSets, we would have to keep this code forever.
295-
for idx := range s.machineSets {
296-
machineSet := s.machineSets[idx]
297-
if err := ssa.CleanUpManagedFieldsForSSAAdoption(ctx, r.Client, machineSet, machineDeploymentManagerName); err != nil {
298-
return errors.Wrapf(err, "failed to clean up managedFields of MachineSet %s", klog.KObj(machineSet))
299-
}
300-
}
301-
302289
templateExists := s.infrastructureTemplateExists && (!md.Spec.Template.Spec.Bootstrap.ConfigRef.IsDefined() || s.bootstrapTemplateExists)
303290

304291
if ptr.Deref(md.Spec.Paused, false) {

internal/controllers/machinedeployment/machinedeployment_controller_test.go

Lines changed: 0 additions & 191 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ import (
3333

3434
clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2"
3535
"sigs.k8s.io/cluster-api/controllers/external"
36-
"sigs.k8s.io/cluster-api/internal/util/ssa"
3736
"sigs.k8s.io/cluster-api/util"
3837
v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1"
3938
"sigs.k8s.io/cluster-api/util/patch"
@@ -500,196 +499,6 @@ func TestMachineDeploymentReconciler(t *testing.T) {
500499
})
501500
}
502501

503-
func TestMachineDeploymentReconciler_CleanUpManagedFieldsForSSAAdoption(t *testing.T) {
504-
setup := func(t *testing.T, g *WithT) (*corev1.Namespace, *clusterv1.Cluster) {
505-
t.Helper()
506-
507-
t.Log("Creating the namespace")
508-
ns, err := env.CreateNamespace(ctx, machineDeploymentNamespace)
509-
g.Expect(err).ToNot(HaveOccurred())
510-
511-
t.Log("Creating the Cluster")
512-
cluster := &clusterv1.Cluster{
513-
ObjectMeta: metav1.ObjectMeta{
514-
Namespace: ns.Name,
515-
Name: "test-cluster",
516-
},
517-
Spec: clusterv1.ClusterSpec{
518-
ControlPlaneRef: clusterv1.ContractVersionedObjectReference{
519-
APIGroup: builder.ControlPlaneGroupVersion.Group,
520-
Kind: builder.GenericControlPlaneKind,
521-
Name: "cp1",
522-
},
523-
},
524-
}
525-
g.Expect(env.Create(ctx, cluster)).To(Succeed())
526-
527-
t.Log("Creating the Cluster Kubeconfig Secret")
528-
g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed())
529-
530-
return ns, cluster
531-
}
532-
533-
teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace, cluster *clusterv1.Cluster) {
534-
t.Helper()
535-
536-
t.Log("Deleting the Cluster")
537-
g.Expect(env.Delete(ctx, cluster)).To(Succeed())
538-
t.Log("Deleting the namespace")
539-
g.Expect(env.Delete(ctx, ns)).To(Succeed())
540-
}
541-
542-
g := NewWithT(t)
543-
namespace, testCluster := setup(t, g)
544-
defer teardown(t, g, namespace, testCluster)
545-
546-
labels := map[string]string{
547-
"foo": "bar",
548-
clusterv1.ClusterNameLabel: testCluster.Name,
549-
}
550-
version := "v1.10.3"
551-
deployment := &clusterv1.MachineDeployment{
552-
ObjectMeta: metav1.ObjectMeta{
553-
GenerateName: "md-",
554-
Namespace: namespace.Name,
555-
Labels: map[string]string{
556-
clusterv1.ClusterNameLabel: testCluster.Name,
557-
},
558-
},
559-
Spec: clusterv1.MachineDeploymentSpec{
560-
Paused: ptr.To(true), // Set this to true as we do not want to test the other parts of the reconciler in this test.
561-
ClusterName: testCluster.Name,
562-
Replicas: ptr.To[int32](2),
563-
Selector: metav1.LabelSelector{
564-
// We're using the same labels for spec.selector and spec.template.labels.
565-
MatchLabels: labels,
566-
},
567-
Rollout: clusterv1.MachineDeploymentRolloutSpec{
568-
Strategy: clusterv1.MachineDeploymentRolloutStrategy{
569-
Type: clusterv1.RollingUpdateMachineDeploymentStrategyType,
570-
RollingUpdate: clusterv1.MachineDeploymentRolloutStrategyRollingUpdate{
571-
MaxUnavailable: intOrStrPtr(0),
572-
MaxSurge: intOrStrPtr(1),
573-
},
574-
},
575-
},
576-
Deletion: clusterv1.MachineDeploymentDeletionSpec{
577-
Order: clusterv1.OldestMachineSetDeletionOrder,
578-
},
579-
Template: clusterv1.MachineTemplateSpec{
580-
ObjectMeta: clusterv1.ObjectMeta{
581-
Labels: labels,
582-
},
583-
Spec: clusterv1.MachineSpec{
584-
ClusterName: testCluster.Name,
585-
Version: version,
586-
InfrastructureRef: clusterv1.ContractVersionedObjectReference{
587-
APIGroup: clusterv1.GroupVersionInfrastructure.Group,
588-
Kind: "GenericInfrastructureMachineTemplate",
589-
Name: "md-template",
590-
},
591-
Bootstrap: clusterv1.Bootstrap{
592-
DataSecretName: ptr.To("data-secret-name"),
593-
},
594-
},
595-
},
596-
},
597-
}
598-
msListOpts := []client.ListOption{
599-
client.InNamespace(namespace.Name),
600-
client.MatchingLabels(labels),
601-
}
602-
603-
// Create infrastructure template resource.
604-
infraResource := map[string]interface{}{
605-
"kind": "GenericInfrastructureMachine",
606-
"apiVersion": clusterv1.GroupVersionInfrastructure.String(),
607-
"metadata": map[string]interface{}{},
608-
"spec": map[string]interface{}{
609-
"size": "3xlarge",
610-
},
611-
}
612-
infraTmpl := &unstructured.Unstructured{
613-
Object: map[string]interface{}{
614-
"kind": "GenericInfrastructureMachineTemplate",
615-
"apiVersion": clusterv1.GroupVersionInfrastructure.String(),
616-
"metadata": map[string]interface{}{
617-
"name": "md-template",
618-
"namespace": namespace.Name,
619-
},
620-
"spec": map[string]interface{}{
621-
"template": infraResource,
622-
},
623-
},
624-
}
625-
t.Log("Creating the infrastructure template")
626-
g.Expect(env.Create(ctx, infraTmpl)).To(Succeed())
627-
628-
// Create the MachineDeployment object and expect Reconcile to be called.
629-
t.Log("Creating the MachineDeployment")
630-
g.Expect(env.Create(ctx, deployment)).To(Succeed())
631-
632-
// Create a MachineSet for the MachineDeployment.
633-
classicManagerMS := &clusterv1.MachineSet{
634-
TypeMeta: metav1.TypeMeta{
635-
Kind: "MachineSet",
636-
APIVersion: clusterv1.GroupVersion.String(),
637-
},
638-
ObjectMeta: metav1.ObjectMeta{
639-
Name: deployment.Name + "-" + "classic-ms",
640-
Namespace: testCluster.Namespace,
641-
Labels: labels,
642-
},
643-
Spec: clusterv1.MachineSetSpec{
644-
ClusterName: testCluster.Name,
645-
Replicas: ptr.To[int32](0),
646-
Selector: metav1.LabelSelector{
647-
MatchLabels: labels,
648-
},
649-
Template: clusterv1.MachineTemplateSpec{
650-
ObjectMeta: clusterv1.ObjectMeta{
651-
Labels: labels,
652-
},
653-
Spec: clusterv1.MachineSpec{
654-
ClusterName: testCluster.Name,
655-
InfrastructureRef: clusterv1.ContractVersionedObjectReference{
656-
APIGroup: clusterv1.GroupVersionInfrastructure.Group,
657-
Kind: "GenericInfrastructureMachineTemplate",
658-
Name: "md-template",
659-
},
660-
Bootstrap: clusterv1.Bootstrap{
661-
DataSecretName: ptr.To("data-secret-name"),
662-
},
663-
Version: version,
664-
},
665-
},
666-
},
667-
}
668-
ssaManagerMS := classicManagerMS.DeepCopy()
669-
ssaManagerMS.Name = deployment.Name + "-" + "ssa-ms"
670-
671-
// Create one using the "old manager".
672-
g.Expect(env.Create(ctx, classicManagerMS, client.FieldOwner("manager"))).To(Succeed())
673-
674-
// Create one using SSA.
675-
g.Expect(env.Patch(ctx, ssaManagerMS, client.Apply, client.FieldOwner(machineDeploymentManagerName), client.ForceOwnership)).To(Succeed())
676-
677-
// Verify that for both the MachineSets the ManagedFields are updated.
678-
g.Eventually(func(g Gomega) {
679-
machineSets := &clusterv1.MachineSetList{}
680-
g.Expect(env.List(ctx, machineSets, msListOpts...)).To(Succeed())
681-
682-
g.Expect(machineSets.Items).To(HaveLen(2))
683-
for _, ms := range machineSets.Items {
684-
// Verify the ManagedFields are updated.
685-
g.Expect(ms.GetManagedFields()).Should(
686-
ContainElement(ssa.MatchManagedFieldsEntry(machineDeploymentManagerName, metav1.ManagedFieldsOperationApply)))
687-
g.Expect(ms.GetManagedFields()).ShouldNot(
688-
ContainElement(ssa.MatchManagedFieldsEntry("manager", metav1.ManagedFieldsOperationUpdate)))
689-
}
690-
}).Should(Succeed())
691-
}
692-
693502
func TestMachineSetToDeployments(t *testing.T) {
694503
g := NewWithT(t)
695504

internal/controllers/machineset/machineset_controller.go

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -544,14 +544,6 @@ func (r *Reconciler) syncMachines(ctx context.Context, s *scope) (ctrl.Result, e
544544
}
545545
}
546546

547-
// Cleanup managed fields of all Machines.
548-
// We do this so that Machines that were created/patched before the controller adopted Server-Side-Apply (SSA)
549-
// (< v1.4.0) can also work with SSA. Otherwise, fields would be co-owned by our "old" "manager" and
550-
// "capi-machineset" and then we would not be able to e.g. drop labels and annotations.
551-
if err := ssa.CleanUpManagedFieldsForSSAAdoption(ctx, r.Client, m, machineSetManagerName); err != nil {
552-
return ctrl.Result{}, errors.Wrapf(err, "failed to update machine: failed to adjust the managedFields of the Machine %q", m.Name)
553-
}
554-
555547
// Update Machine to propagate in-place mutable fields from the MachineSet.
556548
updatedMachine, err := r.computeDesiredMachine(machineSet, m)
557549
if err != nil {

internal/controllers/machineset/machineset_controller_test.go

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1244,7 +1244,6 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) {
12441244
Kind: "Machine",
12451245
},
12461246
ObjectMeta: metav1.ObjectMeta{
1247-
UID: "abc-123-uid",
12481247
Name: "in-place-mutating-machine",
12491248
Namespace: namespace.Name,
12501249
Labels: map[string]string{
@@ -1274,15 +1273,14 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) {
12741273
},
12751274
},
12761275
}
1277-
g.Expect(env.Create(ctx, inPlaceMutatingMachine, client.FieldOwner(classicManager))).To(Succeed())
1276+
g.Expect(env.PatchAndWait(ctx, inPlaceMutatingMachine, client.FieldOwner(machineSetManagerName))).To(Succeed())
12781277

12791278
deletingMachine := &clusterv1.Machine{
12801279
TypeMeta: metav1.TypeMeta{
12811280
APIVersion: clusterv1.GroupVersion.String(),
12821281
Kind: "Machine",
12831282
},
12841283
ObjectMeta: metav1.ObjectMeta{
1285-
UID: "abc-123-uid",
12861284
Name: "deleting-machine",
12871285
Namespace: namespace.Name,
12881286
Labels: map[string]string{},
@@ -1301,7 +1299,7 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) {
13011299
},
13021300
},
13031301
}
1304-
g.Expect(env.Create(ctx, deletingMachine, client.FieldOwner(classicManager))).To(Succeed())
1302+
g.Expect(env.PatchAndWait(ctx, deletingMachine, client.FieldOwner(machineSetManagerName))).To(Succeed())
13051303
// Delete the machine to put it in the deleting state
13061304
g.Expect(env.Delete(ctx, deletingMachine)).To(Succeed())
13071305
// Wait till the machine is marked for deletion
@@ -1476,13 +1474,13 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) {
14761474
g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed())
14771475

14781476
// Verify ManagedFields
1479-
g.Expect(updatedDeletingMachine.ManagedFields).ShouldNot(
1477+
g.Expect(updatedDeletingMachine.ManagedFields).Should(
14801478
ContainElement(ssa.MatchManagedFieldsEntry(machineSetManagerName, metav1.ManagedFieldsOperationApply)),
1481-
"deleting machine should not contain an entry for SSA manager",
1479+
"deleting machine should contain an entry for SSA manager",
14821480
)
1483-
g.Expect(updatedDeletingMachine.ManagedFields).Should(
1484-
ContainElement(ssa.MatchManagedFieldsEntry("manager", metav1.ManagedFieldsOperationUpdate)),
1485-
"in-place mutable machine should still contain an entry for old manager",
1481+
g.Expect(updatedDeletingMachine.ManagedFields).ShouldNot(
1482+
ContainElement(ssa.MatchManagedFieldsEntry(classicManager, metav1.ManagedFieldsOperationUpdate)),
1483+
"deleting machine should not contain an entry for old manager",
14861484
)
14871485

14881486
// Verify in-place mutable fields are still the same.

0 commit comments

Comments
 (0)