diff --git a/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go b/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go index 1b8636909f27..cf1d0d5ffcd9 100644 --- a/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go +++ b/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go @@ -347,6 +347,11 @@ type APIEndpoint struct { BindPort int32 `json:"bindPort,omitempty"` } +// IsDefined returns true if the APIEndpoint is defined. +func (r *APIEndpoint) IsDefined() bool { + return r.AdvertiseAddress != "" || r.BindPort != 0 +} + // NodeRegistrationOptions holds fields that relate to registering a new control-plane or node to the cluster, either via "kubeadm init" or "kubeadm join". // Note: The NodeRegistrationOptions struct has to be kept in sync with the structs in MarshalJSON. // +kubebuilder:validation:MinProperties=1 diff --git a/api/bootstrap/kubeadm/v1beta2/kubeadmconfig_types.go b/api/bootstrap/kubeadm/v1beta2/kubeadmconfig_types.go index 1f7d6ef4082d..cfc72651b95f 100644 --- a/api/bootstrap/kubeadm/v1beta2/kubeadmconfig_types.go +++ b/api/bootstrap/kubeadm/v1beta2/kubeadmconfig_types.go @@ -547,6 +547,16 @@ type KubeadmConfig struct { Status KubeadmConfigStatus `json:"status,omitempty,omitzero"` } +// IsForJoin returns true if the KubeadmConfig is for a kubeadm join. +func (c *KubeadmConfig) IsForJoin() bool { + return c.Spec.JoinConfiguration.IsDefined() +} + +// IsForInit returns true if the KubeadmConfig is for a kubeadm init. +func (c *KubeadmConfig) IsForInit() bool { + return !c.IsForJoin() +} + // GetV1Beta1Conditions returns the set of conditions for this object. func (c *KubeadmConfig) GetV1Beta1Conditions() clusterv1.Conditions { if c.Status.Deprecated == nil || c.Status.Deprecated.V1Beta1 == nil { diff --git a/controlplane/kubeadm/internal/cluster_labels.go b/controlplane/kubeadm/internal/cluster_labels.go deleted file mode 100644 index f0ff9663b9fd..000000000000 --- a/controlplane/kubeadm/internal/cluster_labels.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/labels/format" -) - -// ControlPlaneMachineLabelsForCluster returns a set of labels to add to a control plane machine for this specific cluster. -func ControlPlaneMachineLabelsForCluster(kcp *controlplanev1.KubeadmControlPlane, clusterName string) map[string]string { - labels := map[string]string{} - - // Add the labels from the MachineTemplate. - // Note: we intentionally don't use the map directly to ensure we don't modify the map in KCP. - for k, v := range kcp.Spec.MachineTemplate.ObjectMeta.Labels { - labels[k] = v - } - - // Always force these labels over the ones coming from the spec. - labels[clusterv1.ClusterNameLabel] = clusterName - labels[clusterv1.MachineControlPlaneLabel] = "" - // Note: MustFormatValue is used here as the label value can be a hash if the control plane name is longer than 63 characters. - labels[clusterv1.MachineControlPlaneNameLabel] = format.MustFormatValue(kcp.Name) - return labels -} diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index e599a8622931..aa84b08d4316 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -50,7 +50,7 @@ type ControlPlane struct { Machines collections.Machines machinesPatchHelpers map[string]*patch.Helper - machinesNotUptoDate collections.Machines + MachinesNotUptoDate collections.Machines machinesNotUpToDateResults map[string]NotUpToDateResult // reconciliationTime is the time of the current reconciliation, and should be used for all "now" calculations @@ -119,14 +119,16 @@ func NewControlPlane(ctx context.Context, managementCluster ManagementCluster, c machinesNotUptoDate := make(collections.Machines, len(ownedMachines)) machinesNotUpToDateResults := map[string]NotUpToDateResult{} for _, m := range ownedMachines { - upToDate, notUpToDateResult, err := UpToDate(m, kcp, &reconciliationTime, infraMachines, kubeadmConfigs) + upToDate, notUpToDateResult, err := UpToDate(ctx, client, cluster, m, kcp, &reconciliationTime, infraMachines, kubeadmConfigs) if err != nil { return nil, err } if !upToDate { machinesNotUptoDate.Insert(m) - machinesNotUpToDateResults[m.Name] = *notUpToDateResult } + // Set this even if machine is UpToDate. This is needed to complete triggering in-place updates + // MachinesNotUptoDate should always be used instead to check if a Machine is up-to-date. + machinesNotUpToDateResults[m.Name] = *notUpToDateResult } return &ControlPlane{ @@ -134,7 +136,7 @@ func NewControlPlane(ctx context.Context, managementCluster ManagementCluster, c Cluster: cluster, Machines: ownedMachines, machinesPatchHelpers: patchHelpers, - machinesNotUptoDate: machinesNotUptoDate, + MachinesNotUptoDate: machinesNotUptoDate, machinesNotUpToDateResults: machinesNotUpToDateResults, KubeadmConfigs: kubeadmConfigs, InfraResources: infraMachines, @@ -216,25 +218,6 @@ func getGetFailureDomainIDs(failureDomains []clusterv1.FailureDomain) []string { return ids } -// InitialControlPlaneConfig returns a new KubeadmConfigSpec that is to be used for an initializing control plane. -func (c *ControlPlane) InitialControlPlaneConfig() *bootstrapv1.KubeadmConfigSpec { - bootstrapSpec := c.KCP.Spec.KubeadmConfigSpec.DeepCopy() - // Note: When building a KubeadmConfig for the first CP machine empty out the unnecessary JoinConfiguration. - bootstrapSpec.JoinConfiguration = bootstrapv1.JoinConfiguration{} - return bootstrapSpec -} - -// JoinControlPlaneConfig returns a new KubeadmConfigSpec that is to be used for joining control planes. -func (c *ControlPlane) JoinControlPlaneConfig() *bootstrapv1.KubeadmConfigSpec { - bootstrapSpec := c.KCP.Spec.KubeadmConfigSpec.DeepCopy() - // Note: When building a KubeadmConfig for a joining CP machine empty out the unnecessary InitConfiguration. - bootstrapSpec.InitConfiguration = bootstrapv1.InitConfiguration{} - // NOTE: For the joining we are preserving the ClusterConfiguration in order to determine if the - // cluster is using an external etcd in the kubeadm bootstrap provider (even if this is not required by kubeadm Join). - // TODO: Determine if this copy of cluster configuration can be used for rollouts (thus allowing to remove the annotation at machine level) - return bootstrapSpec -} - // HasDeletingMachine returns true if any machine in the control plane is in the process of being deleted. func (c *ControlPlane) HasDeletingMachine() bool { return len(c.Machines.Filter(collections.HasDeletionTimestamp)) > 0 @@ -254,19 +237,19 @@ func (c *ControlPlane) GetKubeadmConfig(machineName string) (*bootstrapv1.Kubead // MachinesNeedingRollout return a list of machines that need to be rolled out. func (c *ControlPlane) MachinesNeedingRollout() (collections.Machines, map[string]NotUpToDateResult) { // Note: Machines already deleted are dropped because they will be replaced by new machines after deletion completes. - return c.machinesNotUptoDate.Filter(collections.Not(collections.HasDeletionTimestamp)), c.machinesNotUpToDateResults + return c.MachinesNotUptoDate.Filter(collections.Not(collections.HasDeletionTimestamp)), c.machinesNotUpToDateResults } // NotUpToDateMachines return a list of machines that are not up to date with the control // plane's configuration. func (c *ControlPlane) NotUpToDateMachines() (collections.Machines, map[string]NotUpToDateResult) { - return c.machinesNotUptoDate, c.machinesNotUpToDateResults + return c.MachinesNotUptoDate, c.machinesNotUpToDateResults } // UpToDateMachines returns the machines that are up to date with the control // plane's configuration. func (c *ControlPlane) UpToDateMachines() collections.Machines { - return c.Machines.Difference(c.machinesNotUptoDate) + return c.Machines.Difference(c.MachinesNotUptoDate) } // getInfraMachines fetches the InfraMachine for each machine in the collection and returns a map of machine.Name -> InfraMachine. diff --git a/controlplane/kubeadm/internal/control_plane_test.go b/controlplane/kubeadm/internal/control_plane_test.go index f3f059e6ba01..fd6a63b5d952 100644 --- a/controlplane/kubeadm/internal/control_plane_test.go +++ b/controlplane/kubeadm/internal/control_plane_test.go @@ -124,13 +124,13 @@ func TestControlPlane(t *testing.T) { machinesNotUptoDate, machinesNotUpToDateResults := controlPlane.NotUpToDateMachines() g.Expect(machinesNotUptoDate.Names()).To(ConsistOf("m2", "m3")) - g.Expect(machinesNotUpToDateResults).To(HaveLen(2)) + g.Expect(machinesNotUpToDateResults).To(HaveLen(5)) g.Expect(machinesNotUpToDateResults["m2"].ConditionMessages).To(Equal([]string{"Version v1.29.0, v1.31.0 required"})) g.Expect(machinesNotUpToDateResults["m3"].ConditionMessages).To(Equal([]string{"Version v1.29.3, v1.31.0 required"})) machinesNeedingRollout, machinesNotUpToDateResults := controlPlane.MachinesNeedingRollout() g.Expect(machinesNeedingRollout.Names()).To(ConsistOf("m2")) - g.Expect(machinesNotUpToDateResults).To(HaveLen(2)) + g.Expect(machinesNotUpToDateResults).To(HaveLen(5)) g.Expect(machinesNotUpToDateResults["m2"].LogMessages).To(Equal([]string{"Machine version \"v1.29.0\" is not equal to KCP version \"v1.31.0\""})) g.Expect(machinesNotUpToDateResults["m3"].LogMessages).To(Equal([]string{"Machine version \"v1.29.3\" is not equal to KCP version \"v1.31.0\""})) @@ -339,7 +339,7 @@ func TestStatusToLogKeyAndValues(t *testing.T) { c := &ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{}, Machines: collections.FromMachines(healthyMachine, machineWithoutNode, machineJustDeleted, machineNotUpToDate, machineMarkedForRemediation), - machinesNotUptoDate: collections.FromMachines(machineNotUpToDate), + MachinesNotUptoDate: collections.FromMachines(machineNotUpToDate), EtcdMembers: []*etcd.Member{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}, } diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index c82e5a47c471..402da683713d 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -802,7 +802,11 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro if err != nil { return errors.Wrapf(err, "failed to update Machine: %s", klog.KObj(m)) } + // Note: Ensure ControlPlane has the latest version of the Machine. controlPlane.Machines[machineName] = updatedMachine + if _, ok := controlPlane.MachinesNotUptoDate[machineName]; ok { + controlPlane.MachinesNotUptoDate[machineName] = updatedMachine + } // Since the machine is updated, re-create the patch helper so that any subsequent // Patch calls use the correct base machine object to calculate the diffs. // Example: reconcileControlPlaneAndMachinesConditions patches the machine objects in a subsequent call diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 2be1a060fe67..41221af2a8e4 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -52,6 +52,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" controlplanev1webhooks "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/webhooks" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" @@ -520,7 +521,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: name, - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Labels: desiredstate.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -588,7 +589,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: name, - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Labels: desiredstate.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -703,7 +704,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: name, - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Labels: desiredstate.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -761,7 +762,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: "test0", - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Labels: desiredstate.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -1841,7 +1842,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { NodeVolumeDetachTimeoutSeconds: duration5s, NodeDeletionTimeoutSeconds: duration5s, }, - ReadinessGates: mandatoryMachineReadinessGates, + ReadinessGates: desiredstate.MandatoryMachineReadinessGates, }, } g.Expect(env.PatchAndWait(ctx, deletingMachine, client.FieldOwner(kcpManagerName))).To(Succeed()) @@ -3921,17 +3922,17 @@ func TestObjectsPendingDelete(t *testing.T) { // test utils. -func newFakeClient(initObjs ...client.Object) client.Client { +func newFakeClient(initObjs ...client.Object) client.WithWatch { return &fakeClient{ startTime: time.Now(), - Client: fake.NewClientBuilder().WithObjects(initObjs...).WithStatusSubresource(&controlplanev1.KubeadmControlPlane{}).Build(), + WithWatch: fake.NewClientBuilder().WithObjects(initObjs...).WithStatusSubresource(&controlplanev1.KubeadmControlPlane{}).Build(), } } type fakeClient struct { startTime time.Time mux sync.Mutex - client.Client + client.WithWatch } type fakeClientI interface { @@ -3947,7 +3948,7 @@ func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...clie f.SetCreationTimestamp(metav1.NewTime(c.startTime)) c.mux.Unlock() } - return c.Client.Create(ctx, obj, opts...) + return c.WithWatch.Create(ctx, obj, opts...) } func createClusterWithControlPlane(namespace string) (*clusterv1.Cluster, *controlplanev1.KubeadmControlPlane, *unstructured.Unstructured) { @@ -4038,7 +4039,7 @@ func createMachineNodePair(name string, cluster *clusterv1.Cluster, kcp *control ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: name, - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Labels: desiredstate.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), Annotations: map[string]string{}, OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")), diff --git a/controlplane/kubeadm/internal/controllers/helpers.go b/controlplane/kubeadm/internal/controllers/helpers.go index a802d899a5f0..04fb74ba6d29 100644 --- a/controlplane/kubeadm/internal/controllers/helpers.go +++ b/controlplane/kubeadm/internal/controllers/helpers.go @@ -26,9 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -37,8 +35,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" - "sigs.k8s.io/cluster-api/internal/contract" - topologynames "sigs.k8s.io/cluster-api/internal/topology/names" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" @@ -48,19 +45,6 @@ import ( "sigs.k8s.io/cluster-api/util/secret" ) -// mandatoryMachineReadinessGates are readinessGates KCP enforces to be set on machine it owns. -var mandatoryMachineReadinessGates = []clusterv1.MachineReadinessGate{ - {ConditionType: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition}, - {ConditionType: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition}, - {ConditionType: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition}, -} - -// etcdMandatoryMachineReadinessGates are readinessGates KCP enforces to be set on machine it owns if etcd is managed. -var etcdMandatoryMachineReadinessGates = []clusterv1.MachineReadinessGate{ - {ConditionType: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition}, - {ConditionType: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition}, -} - func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) @@ -182,61 +166,32 @@ func (r *KubeadmControlPlaneReconciler) reconcileExternalReference(ctx context.C return patchHelper.Patch(ctx, obj) } -func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, bootstrapSpec *bootstrapv1.KubeadmConfigSpec, failureDomain string) (*clusterv1.Machine, error) { +func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, isJoin bool, failureDomain string) (*clusterv1.Machine, error) { var errs []error - // Compute desired Machine - machine, err := r.computeDesiredMachine(kcp, cluster, failureDomain, nil) + machine, err := desiredstate.ComputeDesiredMachine(kcp, cluster, failureDomain, nil) if err != nil { - return nil, errors.Wrap(err, "failed to create Machine: failed to compute desired Machine") - } - - // Since the cloned resource should eventually have a controller ref for the Machine, we create an - // OwnerReference here without the Controller field set - infraCloneOwner := &metav1.OwnerReference{ - APIVersion: controlplanev1.GroupVersion.String(), - Kind: kubeadmControlPlaneKind, - Name: kcp.Name, - UID: kcp.UID, + return nil, errors.Wrap(err, "failed to create Machine") } - // Clone the infrastructure template - apiVersion, err := contract.GetAPIVersion(ctx, r.Client, kcp.Spec.MachineTemplate.Spec.InfrastructureRef.GroupKind()) - if err != nil { - return nil, errors.Wrap(err, "failed to clone infrastructure template") - } - infraMachine, infraRef, err := external.CreateFromTemplate(ctx, &external.CreateFromTemplateInput{ - Client: r.Client, - TemplateRef: &corev1.ObjectReference{ - APIVersion: apiVersion, - Kind: kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Kind, - Namespace: kcp.Namespace, - Name: kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Name, - }, - Namespace: kcp.Namespace, - Name: machine.Name, - OwnerRef: infraCloneOwner, - ClusterName: cluster.Name, - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), - Annotations: kcp.Spec.MachineTemplate.ObjectMeta.Annotations, - }) + infraMachine, infraRef, err := r.createInfraMachine(ctx, kcp, cluster, machine.Name) if err != nil { // Safe to return early here since no resources have been created yet. v1beta1conditions.MarkFalse(kcp, controlplanev1.MachinesCreatedV1Beta1Condition, controlplanev1.InfrastructureTemplateCloningFailedV1Beta1Reason, clusterv1.ConditionSeverityError, "%s", err.Error()) - return nil, errors.Wrap(err, "failed to clone infrastructure template") + return nil, errors.Wrap(err, "failed to create Machine") } machine.Spec.InfrastructureRef = infraRef // Clone the bootstrap configuration - bootstrapConfig, bootstrapRef, err := r.generateKubeadmConfig(ctx, kcp, cluster, bootstrapSpec, machine.Name) + bootstrapConfig, bootstrapRef, err := r.createKubeadmConfig(ctx, kcp, cluster, isJoin, machine.Name) if err != nil { v1beta1conditions.MarkFalse(kcp, controlplanev1.MachinesCreatedV1Beta1Condition, controlplanev1.BootstrapTemplateCloningFailedV1Beta1Reason, clusterv1.ConditionSeverityError, "%s", err.Error()) - errs = append(errs, errors.Wrap(err, "failed to generate bootstrap config")) + errs = append(errs, errors.Wrap(err, "failed to create Machine")) } - // Only proceed to generating the Machine if we haven't encountered an error + // Only proceed to creating the Machine if we haven't encountered an error if len(errs) == 0 { machine.Spec.Bootstrap.ConfigRef = bootstrapRef @@ -250,7 +205,7 @@ func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx conte // If we encountered any errors, attempt to clean up any dangling resources if len(errs) > 0 { if err := r.cleanupFromGeneration(ctx, infraMachine, bootstrapConfig); err != nil { - errs = append(errs, errors.Wrap(err, "failed to cleanup generated resources")) + errs = append(errs, errors.Wrap(err, "failed to cleanup created objects")) } return nil, kerrors.NewAggregate(errs) } @@ -273,34 +228,37 @@ func (r *KubeadmControlPlaneReconciler) cleanupFromGeneration(ctx context.Contex return kerrors.NewAggregate(errs) } -func (r *KubeadmControlPlaneReconciler) generateKubeadmConfig(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, spec *bootstrapv1.KubeadmConfigSpec, name string) (*bootstrapv1.KubeadmConfig, clusterv1.ContractVersionedObjectReference, error) { - // Create an owner reference without a controller reference because the owning controller is the machine controller - owner := metav1.OwnerReference{ - APIVersion: controlplanev1.GroupVersion.String(), - Kind: kubeadmControlPlaneKind, - Name: kcp.Name, - UID: kcp.UID, +func (r *KubeadmControlPlaneReconciler) createInfraMachine(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, name string) (*unstructured.Unstructured, clusterv1.ContractVersionedObjectReference, error) { + infraMachine, err := desiredstate.ComputeInfraMachine(ctx, r.Client, kcp, cluster, name, nil) + if err != nil { + return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create InfraMachine") } - bootstrapConfig := &bootstrapv1.KubeadmConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: kcp.Namespace, - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), - Annotations: kcp.Spec.MachineTemplate.ObjectMeta.Annotations, - OwnerReferences: []metav1.OwnerReference{owner}, - }, - Spec: *spec, + if err := r.Client.Create(ctx, infraMachine); err != nil { + return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create InfraMachine") } - if err := r.Client.Create(ctx, bootstrapConfig); err != nil { - return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrap(err, "failed to create bootstrap configuration") + return infraMachine, clusterv1.ContractVersionedObjectReference{ + APIGroup: infraMachine.GroupVersionKind().Group, + Kind: infraMachine.GetKind(), + Name: infraMachine.GetName(), + }, nil +} + +func (r *KubeadmControlPlaneReconciler) createKubeadmConfig(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, isJoin bool, name string) (*bootstrapv1.KubeadmConfig, clusterv1.ContractVersionedObjectReference, error) { + kubeadmConfig, err := desiredstate.ComputeKubeadmConfig(kcp, cluster, isJoin, name, nil) + if err != nil { + return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create KubeadmConfig") } - return bootstrapConfig, clusterv1.ContractVersionedObjectReference{ + if err := r.Client.Create(ctx, kubeadmConfig); err != nil { + return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create KubeadmConfig") + } + + return kubeadmConfig, clusterv1.ContractVersionedObjectReference{ APIGroup: bootstrapv1.GroupVersion.Group, Kind: "KubeadmConfig", - Name: bootstrapConfig.GetName(), + Name: kubeadmConfig.GetName(), }, nil } @@ -315,19 +273,16 @@ func (r *KubeadmControlPlaneReconciler) updateExternalObject(ctx context.Context updatedObject.SetUID(obj.GetUID()) // Update labels - updatedObject.SetLabels(internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name)) + updatedObject.SetLabels(desiredstate.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name)) // Update annotations - updatedObject.SetAnnotations(kcp.Spec.MachineTemplate.ObjectMeta.Annotations) + updatedObject.SetAnnotations(desiredstate.ControlPlaneMachineAnnotationsForCluster(kcp)) - if err := ssa.Patch(ctx, r.Client, kcpManagerName, updatedObject, ssa.WithCachingProxy{Cache: r.ssaCache, Original: obj}); err != nil { - return errors.Wrapf(err, "failed to update %s", obj.GetObjectKind().GroupVersionKind().Kind) - } - return nil + return ssa.Patch(ctx, r.Client, kcpManagerName, updatedObject, ssa.WithCachingProxy{Cache: r.ssaCache, Original: obj}) } func (r *KubeadmControlPlaneReconciler) createMachine(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) error { if err := ssa.Patch(ctx, r.Client, kcpManagerName, machine); err != nil { - return errors.Wrap(err, "failed to create Machine") + return err } // Remove the annotation tracking that a remediation is in progress (the remediation completed when // the replacement machine has been created above). @@ -336,148 +291,14 @@ func (r *KubeadmControlPlaneReconciler) createMachine(ctx context.Context, kcp * } func (r *KubeadmControlPlaneReconciler) updateMachine(ctx context.Context, machine *clusterv1.Machine, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) (*clusterv1.Machine, error) { - updatedMachine, err := r.computeDesiredMachine(kcp, cluster, machine.Spec.FailureDomain, machine) + updatedMachine, err := desiredstate.ComputeDesiredMachine(kcp, cluster, machine.Spec.FailureDomain, machine) if err != nil { - return nil, errors.Wrap(err, "failed to update Machine: failed to compute desired Machine") + return nil, errors.Wrap(err, "failed to apply Machine") } err = ssa.Patch(ctx, r.Client, kcpManagerName, updatedMachine, ssa.WithCachingProxy{Cache: r.ssaCache, Original: machine}) if err != nil { - return nil, errors.Wrap(err, "failed to update Machine") + return nil, err } return updatedMachine, nil } - -// kubeadmClusterConfigurationAnnotation is an annotation that was set in Cluster API <= v1.11. -// Starting with Cluster API v1.12 we remove it from existing Machines. -// -// Deprecated: This constant and corresponding cleanup code can be removed once we don't support upgrades from Cluster API v1.12 anymore. -const kubeadmClusterConfigurationAnnotation = "controlplane.cluster.x-k8s.io/kubeadm-cluster-configuration" - -// computeDesiredMachine computes the desired Machine. -// This Machine will be used during reconciliation to: -// * create a new Machine -// * update an existing Machine -// Because we are using Server-Side-Apply we always have to calculate the full object. -// There are small differences in how we calculate the Machine depending on if it -// is a create or update. Example: for a new Machine we have to calculate a new name, -// while for an existing Machine we have to use the name of the existing Machine. -func (r *KubeadmControlPlaneReconciler) computeDesiredMachine(kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, failureDomain string, existingMachine *clusterv1.Machine) (*clusterv1.Machine, error) { - var machineName string - var machineUID types.UID - var version string - annotations := map[string]string{} - if existingMachine == nil { - // Creating a new machine - nameTemplate := "{{ .kubeadmControlPlane.name }}-{{ .random }}" - if kcp.Spec.MachineNaming.Template != "" { - nameTemplate = kcp.Spec.MachineNaming.Template - if !strings.Contains(nameTemplate, "{{ .random }}") { - return nil, errors.New("cannot generate Machine name: {{ .random }} is missing in machineNaming.template") - } - } - generatedMachineName, err := topologynames.KCPMachineNameGenerator(nameTemplate, cluster.Name, kcp.Name).GenerateName() - if err != nil { - return nil, errors.Wrap(err, "failed to generate Machine name") - } - machineName = generatedMachineName - version = kcp.Spec.Version - - // In case this machine is being created as a consequence of a remediation, then add an annotation - // tracking remediating data. - // NOTE: This is required in order to track remediation retries. - if remediationData, ok := kcp.Annotations[controlplanev1.RemediationInProgressAnnotation]; ok { - annotations[controlplanev1.RemediationForAnnotation] = remediationData - } - } else { - // Updating an existing machine - machineName = existingMachine.Name - machineUID = existingMachine.UID - version = existingMachine.Spec.Version - - // Cleanup the KubeadmClusterConfigurationAnnotation annotation that was set in Cluster API <= v1.11. - delete(annotations, kubeadmClusterConfigurationAnnotation) - - // If the machine already has remediation data then preserve it. - // NOTE: This is required in order to track remediation retries. - if remediationData, ok := existingMachine.Annotations[controlplanev1.RemediationForAnnotation]; ok { - annotations[controlplanev1.RemediationForAnnotation] = remediationData - } - } - // Setting pre-terminate hook so we can later remove the etcd member right before Machine termination - // (i.e. before InfraMachine deletion). - annotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" - - // Construct the basic Machine. - desiredMachine := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "Machine", - }, - ObjectMeta: metav1.ObjectMeta{ - UID: machineUID, - Name: machineName, - Namespace: kcp.Namespace, - // Note: by setting the ownerRef on creation we signal to the Machine controller that this is not a stand-alone Machine. - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind(kubeadmControlPlaneKind)), - }, - Labels: map[string]string{}, - Annotations: map[string]string{}, - }, - Spec: clusterv1.MachineSpec{ - ClusterName: cluster.Name, - Version: version, - FailureDomain: failureDomain, - }, - } - - // Set the in-place mutable fields. - // When we create a new Machine we will just create the Machine with those fields. - // When we update an existing Machine will we update the fields on the existing Machine (in-place mutate). - - // Set labels - desiredMachine.Labels = internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name) - - // Set annotations - // Add the annotations from the MachineTemplate. - // Note: we intentionally don't use the map directly to ensure we don't modify the map in KCP. - for k, v := range kcp.Spec.MachineTemplate.ObjectMeta.Annotations { - desiredMachine.Annotations[k] = v - } - for k, v := range annotations { - desiredMachine.Annotations[k] = v - } - - // Set other in-place mutable fields - desiredMachine.Spec.Deletion.NodeDrainTimeoutSeconds = kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds - desiredMachine.Spec.Deletion.NodeDeletionTimeoutSeconds = kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds - desiredMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds - - // Note: We intentionally don't set "minReadySeconds" on Machines because we consider it enough to have machine availability driven by readiness of control plane components. - if existingMachine != nil { - desiredMachine.Spec.InfrastructureRef = existingMachine.Spec.InfrastructureRef - desiredMachine.Spec.Bootstrap.ConfigRef = existingMachine.Spec.Bootstrap.ConfigRef - } - - // Set machines readiness gates - allReadinessGates := []clusterv1.MachineReadinessGate{} - allReadinessGates = append(allReadinessGates, mandatoryMachineReadinessGates...) - isEtcdManaged := !kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External.IsDefined() - if isEtcdManaged { - allReadinessGates = append(allReadinessGates, etcdMandatoryMachineReadinessGates...) - } - allReadinessGates = append(allReadinessGates, kcp.Spec.MachineTemplate.Spec.ReadinessGates...) - - desiredMachine.Spec.ReadinessGates = []clusterv1.MachineReadinessGate{} - knownGates := sets.Set[string]{} - for _, gate := range allReadinessGates { - if knownGates.Has(gate.ConditionType) { - continue - } - desiredMachine.Spec.ReadinessGates = append(desiredMachine.Spec.ReadinessGates, gate) - knownGates.Insert(gate.ConditionType) - } - - return desiredMachine, nil -} diff --git a/controlplane/kubeadm/internal/controllers/helpers_test.go b/controlplane/kubeadm/internal/controllers/helpers_test.go index cf9867f7e99c..cfd3a3b0ec05 100644 --- a/controlplane/kubeadm/internal/controllers/helpers_test.go +++ b/controlplane/kubeadm/internal/controllers/helpers_test.go @@ -17,19 +17,21 @@ limitations under the License. package controllers import ( - "fmt" + "context" "testing" . "github.com/onsi/gomega" - gomegatypes "github.com/onsi/gomega/types" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" @@ -350,6 +352,18 @@ func TestCloneConfigsAndGenerateMachine(t *testing.T) { UID: "abc-123-kcp-control-plane", }, Spec: controlplanev1.KubeadmControlPlaneSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + KubeletExtraArgs: []bootstrapv1.Arg{ + { + Name: "v", + Value: ptr.To("8"), + }, + }, + }, + }, + }, MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ InfrastructureRef: clusterv1.ContractVersionedObjectReference{ @@ -372,8 +386,7 @@ func TestCloneConfigsAndGenerateMachine(t *testing.T) { recorder: record.NewFakeRecorder(32), } - bootstrapSpec := &bootstrapv1.KubeadmConfigSpec{} - _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, "") + _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, true, "") g.Expect(err).To(Succeed()) machineList := &clusterv1.MachineList{} @@ -385,12 +398,6 @@ func TestCloneConfigsAndGenerateMachine(t *testing.T) { g.Expect(m.Namespace).To(Equal(cluster.Namespace)) g.Expect(m.Name).NotTo(BeEmpty()) g.Expect(m.Name).To(HavePrefix(kcp.Name + namingTemplateKey)) - - infraObj, err := external.GetObjectFromContractVersionedRef(ctx, r.Client, m.Spec.InfrastructureRef, m.Namespace) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, genericInfrastructureMachineTemplate.GetName())) - g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, genericInfrastructureMachineTemplate.GroupVersionKind().GroupKind().String())) - g.Expect(m.Spec.InfrastructureRef.Name).To(Equal(m.Name)) g.Expect(m.Spec.InfrastructureRef.APIGroup).To(Equal(genericInfrastructureMachineTemplate.GroupVersionKind().Group)) g.Expect(m.Spec.InfrastructureRef.Kind).To(Equal("GenericInfrastructureMachine")) @@ -398,10 +405,35 @@ func TestCloneConfigsAndGenerateMachine(t *testing.T) { g.Expect(m.Spec.Bootstrap.ConfigRef.Name).To(Equal(m.Name)) g.Expect(m.Spec.Bootstrap.ConfigRef.APIGroup).To(Equal(bootstrapv1.GroupVersion.Group)) g.Expect(m.Spec.Bootstrap.ConfigRef.Kind).To(Equal("KubeadmConfig")) + + infraObj, err := external.GetObjectFromContractVersionedRef(ctx, env.GetAPIReader(), m.Spec.InfrastructureRef, m.Namespace) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(infraObj.GetOwnerReferences()).To(HaveLen(1)) + g.Expect(infraObj.GetOwnerReferences()).To(ContainElement(metav1.OwnerReference{ + APIVersion: controlplanev1.GroupVersion.String(), + Kind: "KubeadmControlPlane", + Name: kcp.Name, + UID: kcp.UID, + })) + g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, genericInfrastructureMachineTemplate.GetName())) + g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, genericInfrastructureMachineTemplate.GroupVersionKind().GroupKind().String())) + + kubeadmConfig := &bootstrapv1.KubeadmConfig{} + err = env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}, kubeadmConfig) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(kubeadmConfig.OwnerReferences).To(HaveLen(1)) + g.Expect(kubeadmConfig.OwnerReferences).To(ContainElement(metav1.OwnerReference{ + Kind: "KubeadmControlPlane", + APIVersion: controlplanev1.GroupVersion.String(), + Name: kcp.Name, + UID: kcp.UID, + })) + g.Expect(kubeadmConfig.Spec.InitConfiguration).To(BeComparableTo(bootstrapv1.InitConfiguration{})) + g.Expect(kubeadmConfig.Spec.JoinConfiguration).To(BeComparableTo(kcp.Spec.KubeadmConfigSpec.JoinConfiguration)) } } -func TestCloneConfigsAndGenerateMachineFail(t *testing.T) { +func TestCloneConfigsAndGenerateMachineFailInfraMachineCreation(t *testing.T) { g := NewWithT(t) cluster := &clusterv1.Cluster{ @@ -456,547 +488,178 @@ func TestCloneConfigsAndGenerateMachineFail(t *testing.T) { recorder: record.NewFakeRecorder(32), } - bootstrapSpec := &bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{}, - } - - // Try to break Infra Cloning + // Break InfraMachine cloning kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Name = "something_invalid" - _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, "") + _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, true, "") g.Expect(err).To(HaveOccurred()) g.Expect(&kcp.GetV1Beta1Conditions()[0]).Should(v1beta1conditions.HaveSameStateOf(&clusterv1.Condition{ Type: controlplanev1.MachinesCreatedV1Beta1Condition, Status: corev1.ConditionFalse, Severity: clusterv1.ConditionSeverityError, Reason: controlplanev1.InfrastructureTemplateCloningFailedV1Beta1Reason, - Message: "failed to retrieve GenericInfrastructureMachineTemplate default/something_invalid: genericinfrastructuremachinetemplates.infrastructure.cluster.x-k8s.io \"something_invalid\" not found", + Message: "failed to create InfraMachine: failed to compute desired InfraMachine: failed to retrieve GenericInfrastructureMachineTemplate default/something_invalid: genericinfrastructuremachinetemplates.infrastructure.cluster.x-k8s.io \"something_invalid\" not found", })) + // No objects should exist. + machineList := &clusterv1.MachineList{} + g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(machineList.Items).To(BeEmpty()) + infraMachineList := &unstructured.UnstructuredList{} + infraMachineList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: builder.InfrastructureGroupVersion.Group, + Version: builder.InfrastructureGroupVersion.Version, + Kind: builder.GenericInfrastructureMachineKind, + }) + g.Expect(fakeClient.List(ctx, infraMachineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(infraMachineList.Items).To(BeEmpty()) + kubeadmConfigList := &bootstrapv1.KubeadmConfigList{} + g.Expect(fakeClient.List(ctx, kubeadmConfigList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(kubeadmConfigList.Items).To(BeEmpty()) } -func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { - namingTemplateKey := "-kcp" - kcpName := "testControlPlane" - clusterName := "testCluster" +func TestCloneConfigsAndGenerateMachineFailKubeadmConfigCreation(t *testing.T) { + g := NewWithT(t) cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, + Name: "foo", Namespace: metav1.NamespaceDefault, }, } - duration5s := ptr.To(int32(5)) - duration10s := ptr.To(int32(10)) - kcpMachineTemplateObjectMeta := clusterv1.ObjectMeta{ - Labels: map[string]string{ - "machineTemplateLabel": "machineTemplateLabelValue", - }, - Annotations: map[string]string{ - "machineTemplateAnnotation": "machineTemplateAnnotationValue", - }, - } - kcpMachineTemplateObjectMetaCopy := kcpMachineTemplateObjectMeta.DeepCopy() - infraRef := &clusterv1.ContractVersionedObjectReference{ - Kind: "InfraKind", - APIGroup: clusterv1.GroupVersionInfrastructure.Group, - Name: "infra", - } - bootstrapRef := clusterv1.ContractVersionedObjectReference{ - Kind: "BootstrapKind", - APIGroup: clusterv1.GroupVersionBootstrap.Group, - Name: "bootstrap", - } - - tests := []struct { - name string - kcp *controlplanev1.KubeadmControlPlane - isUpdatingExistingMachine bool - want []gomegatypes.GomegaMatcher - wantErr bool - }{ - { - name: "should return the correct Machine object when creating a new Machine", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - ReadinessGates: []clusterv1.MachineReadinessGate{ - { - ConditionType: "Foo", - }, - }, - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - MachineNaming: controlplanev1.MachineNamingSpec{ - Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", - }, - }, - }, - isUpdatingExistingMachine: false, - want: []gomegatypes.GomegaMatcher{ - HavePrefix(kcpName + namingTemplateKey), - Not(HaveSuffix("00000")), - }, - wantErr: false, - }, - { - name: "should return error when creating a new Machine when '.random' is not added in template", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - MachineNaming: controlplanev1.MachineNamingSpec{ - Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey, - }, - }, - }, - isUpdatingExistingMachine: false, - wantErr: true, - }, - { - name: "should not return error when creating a new Machine when the generated name exceeds 63", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - MachineNaming: controlplanev1.MachineNamingSpec{ - Template: "{{ .random }}" + fmt.Sprintf("%059d", 0), - }, - }, - }, - isUpdatingExistingMachine: false, - want: []gomegatypes.GomegaMatcher{ - ContainSubstring(fmt.Sprintf("%053d", 0)), - Not(HaveSuffix("00000")), - }, - wantErr: false, - }, - { - name: "should return error when creating a new Machine with invalid template", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - MachineNaming: controlplanev1.MachineNamingSpec{ - Template: "some-hardcoded-name-{{ .doesnotexistindata }}-{{ .random }}", // invalid template - }, - }, - }, - isUpdatingExistingMachine: false, - wantErr: true, - }, - { - name: "should return the correct Machine object when creating a new Machine with default templated name", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - }, - }, - isUpdatingExistingMachine: false, - wantErr: false, - want: []gomegatypes.GomegaMatcher{ - HavePrefix(kcpName), - Not(HaveSuffix("00000")), - }, - }, - { - name: "should return the correct Machine object when creating a new Machine with additional kcp readinessGates", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - ReadinessGates: []clusterv1.MachineReadinessGate{ - { - ConditionType: "Bar", - }, - }, - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - }, + genericMachineTemplate := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": builder.GenericInfrastructureMachineTemplateKind, + "apiVersion": builder.InfrastructureGroupVersion.String(), + "metadata": map[string]interface{}{ + "name": "infra-foo", + "namespace": cluster.Namespace, }, - isUpdatingExistingMachine: false, - wantErr: false, - }, - { - name: "should return the correct Machine object when updating an existing Machine (empty ClusterConfiguration annotation)", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - ReadinessGates: []clusterv1.MachineReadinessGate{ - { - ConditionType: "Foo", - }, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - MachineNaming: controlplanev1.MachineNamingSpec{ - Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "hello": "world", }, }, }, - isUpdatingExistingMachine: true, - wantErr: false, }, - { - name: "should return the correct Machine object when updating an existing Machine (outdated ClusterConfiguration annotation)", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - ReadinessGates: []clusterv1.MachineReadinessGate{ - { - ConditionType: "Foo", - }, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - MachineNaming: controlplanev1.MachineNamingSpec{ - Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", - }, - }, - }, - isUpdatingExistingMachine: true, - wantErr: false, + } + + kcp := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kcp-foo", + Namespace: cluster.Namespace, }, - { - name: "should return the correct Machine object when updating an existing Machine (up to date ClusterConfiguration annotation)", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - ReadinessGates: []clusterv1.MachineReadinessGate{ - { - ConditionType: "Foo", - }, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - MachineNaming: controlplanev1.MachineNamingSpec{ - Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", + Spec: controlplanev1.KubeadmControlPlaneSpec{ + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: genericMachineTemplate.GetKind(), + APIGroup: genericMachineTemplate.GroupVersionKind().Group, + Name: genericMachineTemplate.GetName(), }, }, }, - isUpdatingExistingMachine: true, - wantErr: false, + Version: "v1.16.6", }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - var desiredMachine *clusterv1.Machine - failureDomain := "fd-1" - var expectedMachineSpec clusterv1.MachineSpec - var err error - - if tt.isUpdatingExistingMachine { - machineName := "existing-machine" - machineUID := types.UID("abc-123-existing-machine") - // Use different ClusterConfiguration string than the information present in KCP - // to verify that for an existing machine we do not override this information. - remediationData := "remediation-data" - machineVersion := "v1.25.3" - existingMachine := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Name: machineName, - UID: machineUID, - Annotations: map[string]string{ - controlplanev1.RemediationForAnnotation: remediationData, - }, - }, - Spec: clusterv1.MachineSpec{ - Version: machineVersion, - FailureDomain: failureDomain, - Deletion: clusterv1.MachineDeletionSpec{ - NodeDrainTimeoutSeconds: duration10s, - NodeDeletionTimeoutSeconds: duration10s, - NodeVolumeDetachTimeoutSeconds: duration10s, - }, - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: bootstrapRef, - }, - InfrastructureRef: *infraRef, - ReadinessGates: []clusterv1.MachineReadinessGate{{ConditionType: "Foo"}}, - }, - } - - desiredMachine, err = (&KubeadmControlPlaneReconciler{}).computeDesiredMachine( - tt.kcp, cluster, - existingMachine.Spec.FailureDomain, existingMachine, - ) - if tt.wantErr { - g.Expect(err).To(HaveOccurred()) - return - } - g.Expect(err).ToNot(HaveOccurred()) - expectedMachineSpec = clusterv1.MachineSpec{ - ClusterName: cluster.Name, - Version: machineVersion, // Should use the Machine version and not the version from KCP. - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: bootstrapRef, - }, - InfrastructureRef: *infraRef, - FailureDomain: failureDomain, - Deletion: clusterv1.MachineDeletionSpec{ - NodeDrainTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds, - NodeDeletionTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds, - NodeVolumeDetachTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds, - }, - ReadinessGates: append(append(mandatoryMachineReadinessGates, etcdMandatoryMachineReadinessGates...), tt.kcp.Spec.MachineTemplate.Spec.ReadinessGates...), - } - - // Verify the Name and UID of the Machine remain unchanged - g.Expect(desiredMachine.Name).To(Equal(machineName)) - g.Expect(desiredMachine.UID).To(Equal(machineUID)) - // Verify annotations. - expectedAnnotations := map[string]string{} - for k, v := range kcpMachineTemplateObjectMeta.Annotations { - expectedAnnotations[k] = v - } - expectedAnnotations[controlplanev1.RemediationForAnnotation] = remediationData - // The pre-terminate annotation should always be added - expectedAnnotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" - g.Expect(desiredMachine.Annotations).To(Equal(expectedAnnotations)) - } else { - desiredMachine, err = (&KubeadmControlPlaneReconciler{}).computeDesiredMachine( - tt.kcp, cluster, - failureDomain, nil, - ) - if tt.wantErr { - g.Expect(err).To(HaveOccurred()) - return - } - g.Expect(err).ToNot(HaveOccurred()) - - expectedMachineSpec = clusterv1.MachineSpec{ - ClusterName: cluster.Name, - Version: tt.kcp.Spec.Version, - FailureDomain: failureDomain, - Deletion: clusterv1.MachineDeletionSpec{ - NodeDrainTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds, - NodeDeletionTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds, - NodeVolumeDetachTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds, - }, - ReadinessGates: append(append(mandatoryMachineReadinessGates, etcdMandatoryMachineReadinessGates...), tt.kcp.Spec.MachineTemplate.Spec.ReadinessGates...), - } - // Verify Name. - for _, matcher := range tt.want { - g.Expect(desiredMachine.Name).To(matcher) - } - // Verify annotations. - expectedAnnotations := map[string]string{} - for k, v := range kcpMachineTemplateObjectMeta.Annotations { - expectedAnnotations[k] = v - } - // The pre-terminate annotation should always be added - expectedAnnotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" - g.Expect(desiredMachine.Annotations).To(Equal(expectedAnnotations)) - } - - g.Expect(desiredMachine.Namespace).To(Equal(tt.kcp.Namespace)) - g.Expect(desiredMachine.OwnerReferences).To(HaveLen(1)) - g.Expect(desiredMachine.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(tt.kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")))) - g.Expect(desiredMachine.Spec).To(BeComparableTo(expectedMachineSpec)) + fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy(), genericMachineTemplate.DeepCopy(), builder.GenericInfrastructureMachineTemplateCRD) - // Verify that the machineTemplate.ObjectMeta has been propagated to the Machine. - // Verify labels. - expectedLabels := map[string]string{} - for k, v := range kcpMachineTemplateObjectMeta.Labels { - expectedLabels[k] = v - } - expectedLabels[clusterv1.ClusterNameLabel] = cluster.Name - expectedLabels[clusterv1.MachineControlPlaneLabel] = "" - expectedLabels[clusterv1.MachineControlPlaneNameLabel] = tt.kcp.Name - g.Expect(desiredMachine.Labels).To(Equal(expectedLabels)) - - // Verify that machineTemplate.ObjectMeta in KCP has not been modified. - g.Expect(tt.kcp.Spec.MachineTemplate.ObjectMeta.Labels).To(Equal(kcpMachineTemplateObjectMetaCopy.Labels)) - g.Expect(tt.kcp.Spec.MachineTemplate.ObjectMeta.Annotations).To(Equal(kcpMachineTemplateObjectMetaCopy.Annotations)) - }) + r := &KubeadmControlPlaneReconciler{ + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } + + // Break KubeadmConfig computation + kcp.Spec.Version = "something_invalid" + _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, true, "") + g.Expect(err).To(HaveOccurred()) + g.Expect(&kcp.GetV1Beta1Conditions()[0]).Should(v1beta1conditions.HaveSameStateOf(&clusterv1.Condition{ + Type: controlplanev1.MachinesCreatedV1Beta1Condition, + Status: corev1.ConditionFalse, + Severity: clusterv1.ConditionSeverityError, + Reason: controlplanev1.BootstrapTemplateCloningFailedV1Beta1Reason, + Message: "failed to create KubeadmConfig: failed to compute desired KubeadmConfig: failed to parse Kubernetes version \"something_invalid\": Invalid character(s) found in major number \"0something_invalid\"", + })) + // No objects should exist. + machineList := &clusterv1.MachineList{} + g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(machineList.Items).To(BeEmpty()) + infraMachineList := &unstructured.UnstructuredList{} + infraMachineList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: builder.InfrastructureGroupVersion.Group, + Version: builder.InfrastructureGroupVersion.Version, + Kind: builder.GenericInfrastructureMachineKind, + }) + g.Expect(fakeClient.List(ctx, infraMachineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(infraMachineList.Items).To(BeEmpty()) + kubeadmConfigList := &bootstrapv1.KubeadmConfigList{} + g.Expect(fakeClient.List(ctx, kubeadmConfigList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(kubeadmConfigList.Items).To(BeEmpty()) } -func TestKubeadmControlPlaneReconciler_generateKubeadmConfig(t *testing.T) { +func TestCloneConfigsAndGenerateMachineFailMachineCreation(t *testing.T) { g := NewWithT(t) - fakeClient := newFakeClient() cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ - Name: "testCluster", + Name: "foo", Namespace: metav1.NamespaceDefault, }, } + genericMachineTemplate := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": builder.GenericInfrastructureMachineTemplateKind, + "apiVersion": builder.InfrastructureGroupVersion.String(), + "metadata": map[string]interface{}{ + "name": "infra-foo", + "namespace": cluster.Namespace, + }, + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + }, + }, + } + kcp := &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ - Name: "testControlPlane", + Name: "kcp-foo", Namespace: cluster.Namespace, }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: genericMachineTemplate.GetKind(), + APIGroup: genericMachineTemplate.GroupVersionKind().Group, + Name: genericMachineTemplate.GetName(), + }, + }, + }, + Version: "v1.16.6", + }, } - spec := bootstrapv1.KubeadmConfigSpec{} - expectedReferenceKind := "KubeadmConfig" - expectedReferenceAPIGroup := bootstrapv1.GroupVersion.Group - expectedOwner := metav1.OwnerReference{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - Name: kcp.Name, - } + fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy(), genericMachineTemplate.DeepCopy(), builder.GenericInfrastructureMachineTemplateCRD) + // Break Machine creation by injecting an error into the Machine apply call. + fakeClient = interceptor.NewClient(fakeClient, interceptor.Funcs{ + Apply: func(ctx context.Context, c client.WithWatch, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error { + clientObject, ok := obj.(client.Object) + if !ok { + return errors.Errorf("error during Machine creation: unexpected ApplyConfiguration") + } + if clientObject.GetObjectKind().GroupVersionKind().Kind == "Machine" { + return errors.Errorf("fake error during Machine creation") + } + return c.Apply(ctx, obj, opts...) + }, + }) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -1004,19 +667,30 @@ func TestKubeadmControlPlaneReconciler_generateKubeadmConfig(t *testing.T) { recorder: record.NewFakeRecorder(32), } - _, got, err := r.generateKubeadmConfig(ctx, kcp, cluster, spec.DeepCopy(), "kubeadmconfig-name") - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).NotTo(BeNil()) - g.Expect(got.Name).To(Equal("kubeadmconfig-name")) - g.Expect(got.Kind).To(Equal(expectedReferenceKind)) - g.Expect(got.APIGroup).To(Equal(expectedReferenceAPIGroup)) - - bootstrapConfig := &bootstrapv1.KubeadmConfig{} - key := client.ObjectKey{Name: got.Name, Namespace: kcp.Namespace} - g.Expect(fakeClient.Get(ctx, key, bootstrapConfig)).To(Succeed()) - g.Expect(bootstrapConfig.OwnerReferences).To(HaveLen(1)) - g.Expect(bootstrapConfig.OwnerReferences).To(ContainElement(expectedOwner)) - g.Expect(bootstrapConfig.Spec).To(BeComparableTo(spec)) + _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, true, "") + g.Expect(err).To(HaveOccurred()) + g.Expect(&kcp.GetV1Beta1Conditions()[0]).Should(v1beta1conditions.HaveSameStateOf(&clusterv1.Condition{ + Type: controlplanev1.MachinesCreatedV1Beta1Condition, + Status: corev1.ConditionFalse, + Severity: clusterv1.ConditionSeverityError, + Reason: controlplanev1.MachineGenerationFailedV1Beta1Reason, + Message: "failed to apply Machine: fake error during Machine creation", + })) + // No objects should exist. + machineList := &clusterv1.MachineList{} + g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(machineList.Items).To(BeEmpty()) + infraMachineList := &unstructured.UnstructuredList{} + infraMachineList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: builder.InfrastructureGroupVersion.Group, + Version: builder.InfrastructureGroupVersion.Version, + Kind: builder.GenericInfrastructureMachineKind, + }) + g.Expect(fakeClient.List(ctx, infraMachineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(infraMachineList.Items).To(BeEmpty()) + kubeadmConfigList := &bootstrapv1.KubeadmConfigList{} + g.Expect(fakeClient.List(ctx, kubeadmConfigList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(kubeadmConfigList.Items).To(BeEmpty()) } func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index f290d05ad800..e153086fc923 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -21,7 +21,6 @@ import ( "fmt" "strings" - "github.com/blang/semver/v4" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -41,20 +40,12 @@ import ( func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) - bootstrapSpec := controlPlane.InitialControlPlaneConfig() - - parsedVersion, err := semver.ParseTolerant(controlPlane.KCP.Spec.Version) - if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", controlPlane.KCP.Spec.Version) - } - internal.DefaultFeatureGates(bootstrapSpec, parsedVersion) - fd, err := controlPlane.NextFailureDomainForScaleUp(ctx) if err != nil { return ctrl.Result{}, err } - newMachine, err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd) + newMachine, err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, false, fd) if err != nil { log.Error(err, "Failed to create initial control plane Machine") r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedInitialization", "Failed to create initial control plane Machine for cluster %s control plane: %v", klog.KObj(controlPlane.Cluster), err) @@ -79,21 +70,12 @@ func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, return result, err } - // Create the bootstrap configuration - bootstrapSpec := controlPlane.JoinControlPlaneConfig() - - parsedVersion, err := semver.ParseTolerant(controlPlane.KCP.Spec.Version) - if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", controlPlane.KCP.Spec.Version) - } - internal.DefaultFeatureGates(bootstrapSpec, parsedVersion) - fd, err := controlPlane.NextFailureDomainForScaleUp(ctx) if err != nil { return ctrl.Result{}, err } - newMachine, err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd) + newMachine, err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, true, fd) if err != nil { log.Error(err, "Failed to create additional control plane Machine") r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedScaleUp", "Failed to create additional control plane Machine for cluster % control plane: %v", klog.KObj(controlPlane.Cluster), err) diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index f8606fc71968..7650de735267 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -36,6 +36,7 @@ import ( controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" @@ -106,7 +107,7 @@ func TestKubeadmControlPlaneReconciler_initializeControlPlane(t *testing.T) { kubeadmConfig := &bootstrapv1.KubeadmConfig{} bootstrapRef := machineList.Items[0].Spec.Bootstrap.ConfigRef g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: machineList.Items[0].Namespace, Name: bootstrapRef.Name}, kubeadmConfig)).To(Succeed()) - g.Expect(kubeadmConfig.Spec.ClusterConfiguration.FeatureGates).To(BeComparableTo(map[string]bool{internal.ControlPlaneKubeletLocalMode: true})) + g.Expect(kubeadmConfig.Spec.ClusterConfiguration.FeatureGates).To(BeComparableTo(map[string]bool{desiredstate.ControlPlaneKubeletLocalMode: true})) } func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { @@ -174,7 +175,7 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { kubeadmConfig := &bootstrapv1.KubeadmConfig{} bootstrapRef := controlPlaneMachines.Items[0].Spec.Bootstrap.ConfigRef g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: controlPlaneMachines.Items[0].Namespace, Name: bootstrapRef.Name}, kubeadmConfig)).To(Succeed()) - g.Expect(kubeadmConfig.Spec.ClusterConfiguration.FeatureGates).To(BeComparableTo(map[string]bool{internal.ControlPlaneKubeletLocalMode: true})) + g.Expect(kubeadmConfig.Spec.ClusterConfiguration.FeatureGates).To(BeComparableTo(map[string]bool{desiredstate.ControlPlaneKubeletLocalMode: true})) }) t.Run("does not create a control plane Machine if preflight checks fail", func(t *testing.T) { setup := func(t *testing.T, g *WithT) *corev1.Namespace { diff --git a/controlplane/kubeadm/internal/controllers/upgrade_test.go b/controlplane/kubeadm/internal/controllers/upgrade_test.go index c841da58c0c2..61b6bc0bde34 100644 --- a/controlplane/kubeadm/internal/controllers/upgrade_test.go +++ b/controlplane/kubeadm/internal/controllers/upgrade_test.go @@ -34,6 +34,7 @@ import ( bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" @@ -204,7 +205,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleDown(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: name, - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Labels: desiredstate.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ diff --git a/controlplane/kubeadm/internal/desiredstate/desired_state.go b/controlplane/kubeadm/internal/desiredstate/desired_state.go new file mode 100644 index 000000000000..bfbc1543191a --- /dev/null +++ b/controlplane/kubeadm/internal/desiredstate/desired_state.go @@ -0,0 +1,329 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package desiredstate contains utils to compute the desired state of a Machine. +package desiredstate + +import ( + "context" + "strings" + + "github.com/blang/semver/v4" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" + + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/controllers/external" + "sigs.k8s.io/cluster-api/internal/contract" + topologynames "sigs.k8s.io/cluster-api/internal/topology/names" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/labels/format" + "sigs.k8s.io/cluster-api/util/version" +) + +var ( + // minKubernetesVersionControlPlaneKubeletLocalMode is the min version from which + // we will enable the ControlPlaneKubeletLocalMode kubeadm feature gate. + // Note: We have to do this with Kubernetes 1.31. Because with that version we encountered + // a case where it's not okay anymore to ignore the Kubernetes version skew (kubelet 1.31 uses + // the spec.clusterIP field selector that is only implemented in kube-apiserver >= 1.31.0). + minKubernetesVersionControlPlaneKubeletLocalMode = semver.MustParse("1.31.0") + + // ControlPlaneKubeletLocalMode is a feature gate of kubeadm that ensures + // kubelets only communicate with the local apiserver. + ControlPlaneKubeletLocalMode = "ControlPlaneKubeletLocalMode" +) + +// MandatoryMachineReadinessGates are readinessGates KCP enforces to be set on machine it owns. +var MandatoryMachineReadinessGates = []clusterv1.MachineReadinessGate{ + {ConditionType: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition}, + {ConditionType: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition}, + {ConditionType: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition}, +} + +// etcdMandatoryMachineReadinessGates are readinessGates KCP enforces to be set on machine it owns if etcd is managed. +var etcdMandatoryMachineReadinessGates = []clusterv1.MachineReadinessGate{ + {ConditionType: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition}, + {ConditionType: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition}, +} + +// kubeadmClusterConfigurationAnnotation is an annotation that was set in Cluster API <= v1.11. +// Starting with Cluster API v1.12 we remove it from existing Machines. +// +// Deprecated: This constant and corresponding cleanup code can be removed once we don't support upgrades from Cluster API v1.12 anymore. +const kubeadmClusterConfigurationAnnotation = "controlplane.cluster.x-k8s.io/kubeadm-cluster-configuration" + +// ComputeDesiredMachine computes the desired Machine. +// This Machine will be used during reconciliation to: +// * create a new Machine +// * update an existing Machine +// Because we are using Server-Side-Apply we always have to calculate the full object. +// There are small differences in how we calculate the Machine depending on if it +// is a create or update. Example: for a new Machine we have to calculate a new name, +// while for an existing Machine we have to use the name of the existing Machine. +func ComputeDesiredMachine(kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, failureDomain string, existingMachine *clusterv1.Machine) (*clusterv1.Machine, error) { + var machineName string + var machineUID types.UID + var version string + annotations := map[string]string{} + if existingMachine == nil { + // Creating a new machine + nameTemplate := "{{ .kubeadmControlPlane.name }}-{{ .random }}" + if kcp.Spec.MachineNaming.Template != "" { + nameTemplate = kcp.Spec.MachineNaming.Template + if !strings.Contains(nameTemplate, "{{ .random }}") { + return nil, errors.New("failed to compute desired Machine: cannot generate Machine name: {{ .random }} is missing in machineNaming.template") + } + } + generatedMachineName, err := topologynames.KCPMachineNameGenerator(nameTemplate, cluster.Name, kcp.Name).GenerateName() + if err != nil { + return nil, errors.Wrap(err, "failed to compute desired Machine: failed to generate Machine name") + } + machineName = generatedMachineName + version = kcp.Spec.Version + + // In case this machine is being created as a consequence of a remediation, then add an annotation + // tracking remediating data. + // NOTE: This is required in order to track remediation retries. + if remediationData, ok := kcp.Annotations[controlplanev1.RemediationInProgressAnnotation]; ok { + annotations[controlplanev1.RemediationForAnnotation] = remediationData + } + } else { + // Updating an existing machine + machineName = existingMachine.Name + machineUID = existingMachine.UID + version = existingMachine.Spec.Version + + // Cleanup the KubeadmClusterConfigurationAnnotation annotation that was set in Cluster API <= v1.11. + delete(annotations, kubeadmClusterConfigurationAnnotation) + + // If the machine already has remediation data then preserve it. + // NOTE: This is required in order to track remediation retries. + if remediationData, ok := existingMachine.Annotations[controlplanev1.RemediationForAnnotation]; ok { + annotations[controlplanev1.RemediationForAnnotation] = remediationData + } + } + // Setting pre-terminate hook so we can later remove the etcd member right before Machine termination + // (i.e. before InfraMachine deletion). + annotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" + + // Construct the basic Machine. + desiredMachine := &clusterv1.Machine{ + TypeMeta: metav1.TypeMeta{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + }, + ObjectMeta: metav1.ObjectMeta{ + UID: machineUID, + Name: machineName, + Namespace: kcp.Namespace, + // Note: by setting the ownerRef on creation we signal to the Machine controller that this is not a stand-alone Machine. + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")), + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: version, + FailureDomain: failureDomain, + }, + } + + // Set the in-place mutable fields. + // When we create a new Machine we will just create the Machine with those fields. + // When we update an existing Machine will we update the fields on the existing Machine (in-place mutate). + + // Set labels + desiredMachine.Labels = ControlPlaneMachineLabelsForCluster(kcp, cluster.Name) + + // Set annotations + desiredMachine.Annotations = ControlPlaneMachineAnnotationsForCluster(kcp) + for k, v := range annotations { + desiredMachine.Annotations[k] = v + } + + // Set other in-place mutable fields + desiredMachine.Spec.Deletion.NodeDrainTimeoutSeconds = kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds + desiredMachine.Spec.Deletion.NodeDeletionTimeoutSeconds = kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds + desiredMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds + + // Note: We intentionally don't set "minReadySeconds" on Machines because we consider it enough to have machine availability driven by readiness of control plane components. + if existingMachine != nil { + desiredMachine.Spec.InfrastructureRef = existingMachine.Spec.InfrastructureRef + desiredMachine.Spec.Bootstrap.ConfigRef = existingMachine.Spec.Bootstrap.ConfigRef + } + + // Set machines readiness gates + allReadinessGates := []clusterv1.MachineReadinessGate{} + allReadinessGates = append(allReadinessGates, MandatoryMachineReadinessGates...) + isEtcdManaged := !kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External.IsDefined() + if isEtcdManaged { + allReadinessGates = append(allReadinessGates, etcdMandatoryMachineReadinessGates...) + } + allReadinessGates = append(allReadinessGates, kcp.Spec.MachineTemplate.Spec.ReadinessGates...) + + desiredMachine.Spec.ReadinessGates = []clusterv1.MachineReadinessGate{} + knownGates := sets.Set[string]{} + for _, gate := range allReadinessGates { + if knownGates.Has(gate.ConditionType) { + continue + } + desiredMachine.Spec.ReadinessGates = append(desiredMachine.Spec.ReadinessGates, gate) + knownGates.Insert(gate.ConditionType) + } + + return desiredMachine, nil +} + +// ComputeKubeadmConfig computes the desired KubeadmConfig. +func ComputeKubeadmConfig(kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, isJoin bool, name string, existingKubeadmConfig *bootstrapv1.KubeadmConfig) (*bootstrapv1.KubeadmConfig, error) { + // Create an owner reference without a controller reference because the owning controller is the machine controller + var ownerReferences []metav1.OwnerReference + if existingKubeadmConfig == nil || !util.HasOwner(existingKubeadmConfig.OwnerReferences, clusterv1.GroupVersion.String(), []string{"Machine"}) { + ownerReferences = append(ownerReferences, metav1.OwnerReference{ + APIVersion: controlplanev1.GroupVersion.String(), + Kind: "KubeadmControlPlane", + Name: kcp.Name, + UID: kcp.UID, + }) + } + + spec := kcp.Spec.KubeadmConfigSpec.DeepCopy() + if isJoin { + // Note: When building a KubeadmConfig for a joining CP machine empty out the unnecessary InitConfiguration. + spec.InitConfiguration = bootstrapv1.InitConfiguration{} + // NOTE: For the joining we are preserving the ClusterConfiguration in order to determine if the + // cluster is using an external etcd in the kubeadm bootstrap provider (even if this is not required by kubeadm Join). + } else { + // Note: When building a KubeadmConfig for the first CP machine empty out the unnecessary JoinConfiguration. + spec.JoinConfiguration = bootstrapv1.JoinConfiguration{} + } + + parsedVersion, err := semver.ParseTolerant(kcp.Spec.Version) + if err != nil { + return nil, errors.Wrapf(err, "failed to compute desired KubeadmConfig: failed to parse Kubernetes version %q", kcp.Spec.Version) + } + DefaultFeatureGates(spec, parsedVersion) + + return &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: kcp.Namespace, + Labels: ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Annotations: ControlPlaneMachineAnnotationsForCluster(kcp), + OwnerReferences: ownerReferences, + }, + Spec: *spec, + }, nil +} + +// ComputeInfraMachine computes the desired InfraMachine. +func ComputeInfraMachine(ctx context.Context, c client.Client, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, name string, existingInfraMachine *unstructured.Unstructured) (*unstructured.Unstructured, error) { + // Create an owner reference without a controller reference because the owning controller is the machine controller + var ownerReference *metav1.OwnerReference + if existingInfraMachine == nil || !util.HasOwner(existingInfraMachine.GetOwnerReferences(), clusterv1.GroupVersion.String(), []string{"Machine"}) { + ownerReference = &metav1.OwnerReference{ + APIVersion: controlplanev1.GroupVersion.String(), + Kind: "KubeadmControlPlane", + Name: kcp.Name, + UID: kcp.UID, + } + } + + apiVersion, err := contract.GetAPIVersion(ctx, c, kcp.Spec.MachineTemplate.Spec.InfrastructureRef.GroupKind()) + if err != nil { + return nil, errors.Wrap(err, "failed to compute desired InfraMachine") + } + templateRef := &corev1.ObjectReference{ + APIVersion: apiVersion, + Kind: kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Kind, + Namespace: kcp.Namespace, + Name: kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Name, + } + + template, err := external.Get(ctx, c, templateRef) + if err != nil { + return nil, errors.Wrap(err, "failed to compute desired InfraMachine") + } + generateTemplateInput := &external.GenerateTemplateInput{ + Template: template, + TemplateRef: templateRef, + Namespace: kcp.Namespace, + Name: name, + ClusterName: cluster.Name, + OwnerRef: ownerReference, + Labels: ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Annotations: ControlPlaneMachineAnnotationsForCluster(kcp), + } + infraMachine, err := external.GenerateTemplate(generateTemplateInput) + if err != nil { + return nil, errors.Wrap(err, "failed to compute desired InfraMachine") + } + return infraMachine, nil +} + +// DefaultFeatureGates defaults the feature gates field. +func DefaultFeatureGates(kubeadmConfigSpec *bootstrapv1.KubeadmConfigSpec, kubernetesVersion semver.Version) { + if version.Compare(kubernetesVersion, minKubernetesVersionControlPlaneKubeletLocalMode, version.WithoutPreReleases()) < 0 { + return + } + + if kubeadmConfigSpec.ClusterConfiguration.FeatureGates == nil { + kubeadmConfigSpec.ClusterConfiguration.FeatureGates = map[string]bool{} + } + + if _, ok := kubeadmConfigSpec.ClusterConfiguration.FeatureGates[ControlPlaneKubeletLocalMode]; !ok { + kubeadmConfigSpec.ClusterConfiguration.FeatureGates[ControlPlaneKubeletLocalMode] = true + } +} + +// ControlPlaneMachineLabelsForCluster returns a set of labels to add to a control plane machine for this specific cluster. +func ControlPlaneMachineLabelsForCluster(kcp *controlplanev1.KubeadmControlPlane, clusterName string) map[string]string { + labels := map[string]string{} + + // Add the labels from the MachineTemplate. + // Note: we intentionally don't use the map directly to ensure we don't modify the map in KCP. + for k, v := range kcp.Spec.MachineTemplate.ObjectMeta.Labels { + labels[k] = v + } + + // Always force these labels over the ones coming from the spec. + labels[clusterv1.ClusterNameLabel] = clusterName + labels[clusterv1.MachineControlPlaneLabel] = "" + // Note: MustFormatValue is used here as the label value can be a hash if the control plane name is longer than 63 characters. + labels[clusterv1.MachineControlPlaneNameLabel] = format.MustFormatValue(kcp.Name) + return labels +} + +// ControlPlaneMachineAnnotationsForCluster returns a set of annotations to add to a control plane machine for this specific cluster. +func ControlPlaneMachineAnnotationsForCluster(kcp *controlplanev1.KubeadmControlPlane) map[string]string { + annotations := map[string]string{} + + // Add the annotations from the MachineTemplate. + // Note: we intentionally don't use the map directly to ensure we don't modify the map in KCP. + for k, v := range kcp.Spec.MachineTemplate.ObjectMeta.Annotations { + annotations[k] = v + } + + return annotations +} diff --git a/controlplane/kubeadm/internal/desiredstate/desired_state_test.go b/controlplane/kubeadm/internal/desiredstate/desired_state_test.go new file mode 100644 index 000000000000..7eb1ba37d964 --- /dev/null +++ b/controlplane/kubeadm/internal/desiredstate/desired_state_test.go @@ -0,0 +1,884 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package desiredstate + +import ( + "fmt" + "testing" + + "github.com/blang/semver/v4" + . "github.com/onsi/gomega" + gomegatypes "github.com/onsi/gomega/types" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/test/builder" +) + +func Test_ComputeDesiredMachine(t *testing.T) { + namingTemplateKey := "-kcp" + kcpName := "testControlPlane" + clusterName := "testCluster" + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: metav1.NamespaceDefault, + }, + } + duration5s := ptr.To(int32(5)) + duration10s := ptr.To(int32(10)) + kcpMachineTemplateObjectMeta := clusterv1.ObjectMeta{ + Labels: map[string]string{ + "machineTemplateLabel": "machineTemplateLabelValue", + }, + Annotations: map[string]string{ + "machineTemplateAnnotation": "machineTemplateAnnotationValue", + }, + } + kcpMachineTemplateObjectMetaCopy := kcpMachineTemplateObjectMeta.DeepCopy() + + infraRef := &clusterv1.ContractVersionedObjectReference{ + Kind: "InfraKind", + APIGroup: clusterv1.GroupVersionInfrastructure.Group, + Name: "infra", + } + bootstrapRef := clusterv1.ContractVersionedObjectReference{ + Kind: "BootstrapKind", + APIGroup: clusterv1.GroupVersionBootstrap.Group, + Name: "bootstrap", + } + + tests := []struct { + name string + kcp *controlplanev1.KubeadmControlPlane + isUpdatingExistingMachine bool + want []gomegatypes.GomegaMatcher + wantErr bool + }{ + { + name: "should return the correct Machine object when creating a new Machine", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + ReadinessGates: []clusterv1.MachineReadinessGate{ + { + ConditionType: "Foo", + }, + }, + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + MachineNaming: controlplanev1.MachineNamingSpec{ + Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", + }, + }, + }, + isUpdatingExistingMachine: false, + want: []gomegatypes.GomegaMatcher{ + HavePrefix(kcpName + namingTemplateKey), + Not(HaveSuffix("00000")), + }, + wantErr: false, + }, + { + name: "should return error when creating a new Machine when '.random' is not added in template", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + MachineNaming: controlplanev1.MachineNamingSpec{ + Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey, + }, + }, + }, + isUpdatingExistingMachine: false, + wantErr: true, + }, + { + name: "should not return error when creating a new Machine when the generated name exceeds 63", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + MachineNaming: controlplanev1.MachineNamingSpec{ + Template: "{{ .random }}" + fmt.Sprintf("%059d", 0), + }, + }, + }, + isUpdatingExistingMachine: false, + want: []gomegatypes.GomegaMatcher{ + ContainSubstring(fmt.Sprintf("%053d", 0)), + Not(HaveSuffix("00000")), + }, + wantErr: false, + }, + { + name: "should return error when creating a new Machine with invalid template", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + MachineNaming: controlplanev1.MachineNamingSpec{ + Template: "some-hardcoded-name-{{ .doesnotexistindata }}-{{ .random }}", // invalid template + }, + }, + }, + isUpdatingExistingMachine: false, + wantErr: true, + }, + { + name: "should return the correct Machine object when creating a new Machine with default templated name", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + }, + }, + isUpdatingExistingMachine: false, + wantErr: false, + want: []gomegatypes.GomegaMatcher{ + HavePrefix(kcpName), + Not(HaveSuffix("00000")), + }, + }, + { + name: "should return the correct Machine object when creating a new Machine with additional kcp readinessGates", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + ReadinessGates: []clusterv1.MachineReadinessGate{ + { + ConditionType: "Bar", + }, + }, + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + }, + }, + isUpdatingExistingMachine: false, + wantErr: false, + }, + { + name: "should return the correct Machine object when updating an existing Machine (empty ClusterConfiguration annotation)", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + ReadinessGates: []clusterv1.MachineReadinessGate{ + { + ConditionType: "Foo", + }, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + MachineNaming: controlplanev1.MachineNamingSpec{ + Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", + }, + }, + }, + isUpdatingExistingMachine: true, + wantErr: false, + }, + { + name: "should return the correct Machine object when updating an existing Machine (outdated ClusterConfiguration annotation)", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + ReadinessGates: []clusterv1.MachineReadinessGate{ + { + ConditionType: "Foo", + }, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + MachineNaming: controlplanev1.MachineNamingSpec{ + Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", + }, + }, + }, + isUpdatingExistingMachine: true, + wantErr: false, + }, + { + name: "should return the correct Machine object when updating an existing Machine (up to date ClusterConfiguration annotation)", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + ReadinessGates: []clusterv1.MachineReadinessGate{ + { + ConditionType: "Foo", + }, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + MachineNaming: controlplanev1.MachineNamingSpec{ + Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", + }, + }, + }, + isUpdatingExistingMachine: true, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + var desiredMachine *clusterv1.Machine + failureDomain := "fd-1" + var expectedMachineSpec clusterv1.MachineSpec + var err error + + if tt.isUpdatingExistingMachine { + machineName := "existing-machine" + machineUID := types.UID("abc-123-existing-machine") + // Use different ClusterConfiguration string than the information present in KCP + // to verify that for an existing machine we do not override this information. + remediationData := "remediation-data" + machineVersion := "v1.25.3" + existingMachine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineName, + UID: machineUID, + Annotations: map[string]string{ + controlplanev1.RemediationForAnnotation: remediationData, + }, + }, + Spec: clusterv1.MachineSpec{ + Version: machineVersion, + FailureDomain: failureDomain, + Deletion: clusterv1.MachineDeletionSpec{ + NodeDrainTimeoutSeconds: duration10s, + NodeDeletionTimeoutSeconds: duration10s, + NodeVolumeDetachTimeoutSeconds: duration10s, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: bootstrapRef, + }, + InfrastructureRef: *infraRef, + ReadinessGates: []clusterv1.MachineReadinessGate{{ConditionType: "Foo"}}, + }, + } + + desiredMachine, err = ComputeDesiredMachine( + tt.kcp, cluster, + existingMachine.Spec.FailureDomain, existingMachine, + ) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + expectedMachineSpec = clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: machineVersion, // Should use the Machine version and not the version from KCP. + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: bootstrapRef, + }, + InfrastructureRef: *infraRef, + FailureDomain: failureDomain, + Deletion: clusterv1.MachineDeletionSpec{ + NodeDrainTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds, + NodeDeletionTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds, + NodeVolumeDetachTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds, + }, + ReadinessGates: append(append(MandatoryMachineReadinessGates, etcdMandatoryMachineReadinessGates...), tt.kcp.Spec.MachineTemplate.Spec.ReadinessGates...), + } + + // Verify the Name and UID of the Machine remain unchanged + g.Expect(desiredMachine.Name).To(Equal(machineName)) + g.Expect(desiredMachine.UID).To(Equal(machineUID)) + // Verify annotations. + expectedAnnotations := map[string]string{} + for k, v := range kcpMachineTemplateObjectMeta.Annotations { + expectedAnnotations[k] = v + } + expectedAnnotations[controlplanev1.RemediationForAnnotation] = remediationData + // The pre-terminate annotation should always be added + expectedAnnotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" + g.Expect(desiredMachine.Annotations).To(Equal(expectedAnnotations)) + } else { + desiredMachine, err = ComputeDesiredMachine( + tt.kcp, cluster, + failureDomain, nil, + ) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + + expectedMachineSpec = clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: tt.kcp.Spec.Version, + FailureDomain: failureDomain, + Deletion: clusterv1.MachineDeletionSpec{ + NodeDrainTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds, + NodeDeletionTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds, + NodeVolumeDetachTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds, + }, + ReadinessGates: append(append(MandatoryMachineReadinessGates, etcdMandatoryMachineReadinessGates...), tt.kcp.Spec.MachineTemplate.Spec.ReadinessGates...), + } + // Verify Name. + for _, matcher := range tt.want { + g.Expect(desiredMachine.Name).To(matcher) + } + // Verify annotations. + expectedAnnotations := map[string]string{} + for k, v := range kcpMachineTemplateObjectMeta.Annotations { + expectedAnnotations[k] = v + } + // The pre-terminate annotation should always be added + expectedAnnotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" + g.Expect(desiredMachine.Annotations).To(Equal(expectedAnnotations)) + } + + g.Expect(desiredMachine.Namespace).To(Equal(tt.kcp.Namespace)) + g.Expect(desiredMachine.OwnerReferences).To(HaveLen(1)) + g.Expect(desiredMachine.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(tt.kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")))) + g.Expect(desiredMachine.Spec).To(BeComparableTo(expectedMachineSpec)) + + // Verify that the machineTemplate.ObjectMeta has been propagated to the Machine. + // Verify labels. + expectedLabels := map[string]string{} + for k, v := range kcpMachineTemplateObjectMeta.Labels { + expectedLabels[k] = v + } + expectedLabels[clusterv1.ClusterNameLabel] = cluster.Name + expectedLabels[clusterv1.MachineControlPlaneLabel] = "" + expectedLabels[clusterv1.MachineControlPlaneNameLabel] = tt.kcp.Name + g.Expect(desiredMachine.Labels).To(Equal(expectedLabels)) + + // Verify that machineTemplate.ObjectMeta in KCP has not been modified. + g.Expect(tt.kcp.Spec.MachineTemplate.ObjectMeta.Labels).To(Equal(kcpMachineTemplateObjectMetaCopy.Labels)) + g.Expect(tt.kcp.Spec.MachineTemplate.ObjectMeta.Annotations).To(Equal(kcpMachineTemplateObjectMetaCopy.Annotations)) + }) + } +} + +func Test_ComputeKubeadmConfig(t *testing.T) { + g := NewWithT(t) + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + }, + } + kcp := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kcp-foo", + Namespace: cluster.Namespace, + UID: "abc-123-kcp-control-plane", + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + InitConfiguration: bootstrapv1.InitConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + KubeletExtraArgs: []bootstrapv1.Arg{ + { + Name: "v", + Value: ptr.To("4"), + }, + }, + }, + }, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + KubeletExtraArgs: []bootstrapv1.Arg{ + { + Name: "v", + Value: ptr.To("8"), + }, + }, + }, + }, + }, + Version: "v1.31.0", + }, + } + expectedKubeadmConfigWithoutOwner := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-1", + Namespace: cluster.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "", + clusterv1.MachineControlPlaneNameLabel: kcp.Name, + }, + Annotations: map[string]string{}, + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + ControlPlaneKubeletLocalMode: true, + }, + }, + // InitConfiguration and JoinConfiguration is added below. + }, + } + preExistingKubeadmConfigOwnedByMachine := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-1", + Namespace: cluster.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "", + clusterv1.MachineControlPlaneNameLabel: kcp.Name, + }, + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + Name: "machine-1", + }}, + }, + } + + for _, isJoin := range []bool{true, false} { + expectedKubeadmConfigWithoutOwner := expectedKubeadmConfigWithoutOwner.DeepCopy() + if isJoin { + expectedKubeadmConfigWithoutOwner.Spec.InitConfiguration = bootstrapv1.InitConfiguration{} + expectedKubeadmConfigWithoutOwner.Spec.JoinConfiguration = kcp.Spec.KubeadmConfigSpec.JoinConfiguration + } else { + expectedKubeadmConfigWithoutOwner.Spec.InitConfiguration = kcp.Spec.KubeadmConfigSpec.InitConfiguration + expectedKubeadmConfigWithoutOwner.Spec.JoinConfiguration = bootstrapv1.JoinConfiguration{} + } + + kubeadmConfig, err := ComputeKubeadmConfig(kcp, cluster, isJoin, "machine-1", nil) + g.Expect(err).ToNot(HaveOccurred()) + expectedKubeadmConfig := expectedKubeadmConfigWithoutOwner.DeepCopy() + // New KubeadmConfig should have KCP ownerReference. + expectedKubeadmConfig.SetOwnerReferences([]metav1.OwnerReference{{ + APIVersion: controlplanev1.GroupVersion.String(), + Kind: "KubeadmControlPlane", + Name: kcp.Name, + UID: kcp.UID, + }}) + g.Expect(kubeadmConfig).To(BeComparableTo(expectedKubeadmConfig)) + + kubeadmConfig, err = ComputeKubeadmConfig(kcp, cluster, isJoin, "machine-1", preExistingKubeadmConfigOwnedByMachine) + g.Expect(err).ToNot(HaveOccurred()) + // If there is a pre-existing KubeadmConfig that is owned by a Machine, the computed KubeadmConfig + // should have no ownerReferences, so we don't overwrite the ownerReference set by the Machine controller. + g.Expect(kubeadmConfig).To(BeComparableTo(expectedKubeadmConfigWithoutOwner)) + } +} + +func Test_ComputeInfraMachine(t *testing.T) { + g := NewWithT(t) + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + }, + } + infrastructureMachineTemplate := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": builder.GenericInfrastructureMachineTemplateKind, + "apiVersion": builder.InfrastructureGroupVersion.String(), + "metadata": map[string]interface{}{ + "name": "infra-machine-template-1", + "namespace": cluster.Namespace, + }, + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + }, + }, + } + kcp := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kcp-foo", + Namespace: cluster.Namespace, + UID: "abc-123-kcp-control-plane", + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: infrastructureMachineTemplate.GetKind(), + APIGroup: infrastructureMachineTemplate.GroupVersionKind().Group, + Name: infrastructureMachineTemplate.GetName(), + }, + }, + }, + }, + } + expectedInfraMachineWithoutOwner := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": builder.GenericInfrastructureMachineKind, + "apiVersion": builder.InfrastructureGroupVersion.String(), + "metadata": map[string]interface{}{ + "name": "machine-1", + "namespace": cluster.Namespace, + "labels": map[string]interface{}{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "", + clusterv1.MachineControlPlaneNameLabel: kcp.Name, + }, + "annotations": map[string]interface{}{ + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template-1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", + }, + }, + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + } + preExistingInfraMachineOwnedByMachine := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": builder.GenericInfrastructureMachineKind, + "apiVersion": builder.InfrastructureGroupVersion.String(), + "metadata": map[string]interface{}{ + "name": "machine-1", + "namespace": cluster.Namespace, + "labels": map[string]interface{}{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "", + clusterv1.MachineControlPlaneNameLabel: kcp.Name, + }, + "annotations": map[string]interface{}{ + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template-1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", + }, + "ownerReferences": []interface{}{ + map[string]interface{}{ + "apiVersion": clusterv1.GroupVersion.String(), + "kind": "Machine", + "name": "machine-1", + }, + }, + }, + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + } + + scheme := runtime.NewScheme() + _ = apiextensionsv1.AddToScheme(scheme) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(infrastructureMachineTemplate.DeepCopy(), builder.GenericInfrastructureMachineTemplateCRD).Build() + + infraMachine, err := ComputeInfraMachine(t.Context(), fakeClient, kcp, cluster, "machine-1", nil) + g.Expect(err).ToNot(HaveOccurred()) + expectedInfraMachine := expectedInfraMachineWithoutOwner.DeepCopy() + // New InfraMachine should have KCP ownerReference. + expectedInfraMachine.SetOwnerReferences([]metav1.OwnerReference{{ + APIVersion: controlplanev1.GroupVersion.String(), + Kind: "KubeadmControlPlane", + Name: kcp.Name, + UID: kcp.UID, + }}) + g.Expect(infraMachine).To(BeComparableTo(expectedInfraMachine)) + + infraMachine, err = ComputeInfraMachine(t.Context(), fakeClient, kcp, cluster, "machine-1", preExistingInfraMachineOwnedByMachine) + g.Expect(err).ToNot(HaveOccurred()) + // If there is a pre-existing InfraMachine that is owned by a Machine, the computed InfraMachine + // should have no ownerReferences, so we don't overwrite the ownerReference set by the Machine controller. + g.Expect(infraMachine).To(BeComparableTo(expectedInfraMachineWithoutOwner)) +} + +func TestDefaultFeatureGates(t *testing.T) { + tests := []struct { + name string + kubernetesVersion semver.Version + kubeadmConfigSpec *bootstrapv1.KubeadmConfigSpec + wantKubeadmConfigSpec *bootstrapv1.KubeadmConfigSpec + }{ + { + name: "don't default ControlPlaneKubeletLocalMode for 1.30", + kubernetesVersion: semver.MustParse("1.30.99"), + kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + "EtcdLearnerMode": true, + }, + }, + }, + wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + "EtcdLearnerMode": true, + }, + }, + }, + }, + { + name: "default ControlPlaneKubeletLocalMode for 1.31", + kubernetesVersion: semver.MustParse("1.31.0"), + kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, + }, + wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + ControlPlaneKubeletLocalMode: true, + }, + }, + }, + }, + { + name: "default ControlPlaneKubeletLocalMode for 1.31", + kubernetesVersion: semver.MustParse("1.31.0"), + kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: nil, + }, + }, + wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + ControlPlaneKubeletLocalMode: true, + }, + }, + }, + }, + { + name: "default ControlPlaneKubeletLocalMode for 1.31", + kubernetesVersion: semver.MustParse("1.31.0"), + kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{}, + }, + }, + wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + ControlPlaneKubeletLocalMode: true, + }, + }, + }, + }, + { + name: "default ControlPlaneKubeletLocalMode for 1.31", + kubernetesVersion: semver.MustParse("1.31.0"), + kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + "EtcdLearnerMode": true, + }, + }, + }, + wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + ControlPlaneKubeletLocalMode: true, + "EtcdLearnerMode": true, + }, + }, + }, + }, + { + name: "don't default ControlPlaneKubeletLocalMode for 1.31 if already set to false", + kubernetesVersion: semver.MustParse("1.31.0"), + kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + ControlPlaneKubeletLocalMode: false, + }, + }, + }, + wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + ControlPlaneKubeletLocalMode: false, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + DefaultFeatureGates(tt.kubeadmConfigSpec, tt.kubernetesVersion) + g.Expect(tt.wantKubeadmConfigSpec).Should(BeComparableTo(tt.kubeadmConfigSpec)) + }) + } +} diff --git a/controlplane/kubeadm/internal/filters.go b/controlplane/kubeadm/internal/filters.go index b9d29d2054cf..e86820d0b569 100644 --- a/controlplane/kubeadm/internal/filters.go +++ b/controlplane/kubeadm/internal/filters.go @@ -17,37 +17,48 @@ limitations under the License. package internal import ( + "context" "fmt" "reflect" - "github.com/blang/semver/v4" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/defaulting" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" "sigs.k8s.io/cluster-api/internal/util/compare" "sigs.k8s.io/cluster-api/util/collections" ) // NotUpToDateResult is the result of calling the UpToDate func for a Machine. type NotUpToDateResult struct { - LogMessages []string - ConditionMessages []string + LogMessages []string + ConditionMessages []string + EligibleForInPlaceUpdate bool + DesiredMachine *clusterv1.Machine + CurrentInfraMachine *unstructured.Unstructured + DesiredInfraMachine *unstructured.Unstructured + CurrentKubeadmConfig *bootstrapv1.KubeadmConfig + DesiredKubeadmConfig *bootstrapv1.KubeadmConfig } // UpToDate checks if a Machine is up to date with the control plane's configuration. // If not, messages explaining why are provided with different level of detail for logs and conditions. -func UpToDate(machine *clusterv1.Machine, kcp *controlplanev1.KubeadmControlPlane, reconciliationTime *metav1.Time, infraMachines map[string]*unstructured.Unstructured, kubeadmConfigs map[string]*bootstrapv1.KubeadmConfig) (bool, *NotUpToDateResult, error) { - res := &NotUpToDateResult{} +func UpToDate(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, machine *clusterv1.Machine, kcp *controlplanev1.KubeadmControlPlane, reconciliationTime *metav1.Time, infraMachines map[string]*unstructured.Unstructured, kubeadmConfigs map[string]*bootstrapv1.KubeadmConfig) (bool, *NotUpToDateResult, error) { + res := &NotUpToDateResult{ + EligibleForInPlaceUpdate: true, + } // Machines whose certificates are about to expire. if collections.ShouldRolloutBefore(reconciliationTime, kcp.Spec.Rollout.Before)(machine) { res.LogMessages = append(res.LogMessages, "certificates will expire soon, rolloutBefore expired") res.ConditionMessages = append(res.ConditionMessages, "Certificates will expire soon") + res.EligibleForInPlaceUpdate = false } // Machines that are scheduled for rollout (KCP.Spec.RolloutAfter set, @@ -55,10 +66,12 @@ func UpToDate(machine *clusterv1.Machine, kcp *controlplanev1.KubeadmControlPlan if collections.ShouldRolloutAfter(reconciliationTime, kcp.Spec.Rollout.After)(machine) { res.LogMessages = append(res.LogMessages, "rolloutAfter expired") res.ConditionMessages = append(res.ConditionMessages, "KubeadmControlPlane spec.rolloutAfter expired") + res.EligibleForInPlaceUpdate = false } // Machines that do not match with KCP config. - matches, specLogMessages, specConditionMessages, err := matchesMachineSpec(infraMachines, kubeadmConfigs, kcp, machine) + // Note: matchesMachineSpec will update res if necessary. + matches, specLogMessages, specConditionMessages, err := matchesMachineSpec(ctx, c, infraMachines, kubeadmConfigs, kcp, cluster, machine, res) if err != nil { return false, nil, errors.Wrapf(err, "failed to determine if Machine %s is up-to-date", machine.Name) } @@ -71,7 +84,9 @@ func UpToDate(machine *clusterv1.Machine, kcp *controlplanev1.KubeadmControlPlan return false, res, nil } - return true, nil, nil + // If everything matches no need for an update. + res.EligibleForInPlaceUpdate = false + return true, res, nil } // matchesMachineSpec checks if a Machine matches any of a set of KubeadmConfigs and a set of infra machine configs. @@ -82,7 +97,7 @@ func UpToDate(machine *clusterv1.Machine, kcp *controlplanev1.KubeadmControlPlan // - mutated in-place (ex: NodeDrainTimeoutSeconds) // - are not dictated by KCP (ex: ProviderID) // - are not relevant for the rollout decision (ex: failureDomain). -func matchesMachineSpec(infraMachines map[string]*unstructured.Unstructured, kubeadmConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (bool, []string, []string, error) { +func matchesMachineSpec(ctx context.Context, c client.Client, infraMachines map[string]*unstructured.Unstructured, kubeadmConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, machine *clusterv1.Machine, res *NotUpToDateResult) (bool, []string, []string, error) { logMessages := []string{} conditionMessages := []string{} @@ -92,17 +107,35 @@ func matchesMachineSpec(infraMachines map[string]*unstructured.Unstructured, kub // Note: the code computing the message for KCP's RolloutOut condition is making assumptions on the format/content of this message. conditionMessages = append(conditionMessages, fmt.Sprintf("Version %s, %s required", machineVersion, kcp.Spec.Version)) } + desiredMachine, err := desiredstate.ComputeDesiredMachine(kcp, cluster, machine.Spec.FailureDomain, machine) + if err != nil { + return false, nil, nil, errors.Wrapf(err, "failed to match Machine") + } + // Note: spec.version is not mutated in-place by syncMachines and accordingly + // not updated by desiredstate.ComputeDesiredMachine, so we have to update it here. + desiredMachine.Spec.Version = kcp.Spec.Version + // Note: spec.failureDomain is in general only changed on delete/create, so we don't have to update it here for in-place. + res.DesiredMachine = desiredMachine + // Note: Intentionally not storing currentMachine as it can change later, e.g. through syncMachines. - reason, matches, err := matchesKubeadmConfig(kubeadmConfigs, kcp, machine) + reason, currentKubeadmConfig, desiredKubeadmConfig, matches, err := matchesKubeadmConfig(kubeadmConfigs, kcp, cluster, machine) if err != nil { - return false, nil, nil, errors.Wrapf(err, "failed to match Machine spec") + return false, nil, nil, errors.Wrapf(err, "failed to match Machine") } + res.CurrentKubeadmConfig = currentKubeadmConfig + res.DesiredKubeadmConfig = desiredKubeadmConfig if !matches { logMessages = append(logMessages, reason) conditionMessages = append(conditionMessages, "KubeadmConfig is not up-to-date") } - if reason, matches := matchesInfraMachine(infraMachines, kcp, machine); !matches { + reason, currentInfraMachine, desiredInfraMachine, matches, err := matchesInfraMachine(ctx, c, infraMachines, kcp, cluster, machine) + if err != nil { + return false, nil, nil, errors.Wrapf(err, "failed to match Machine") + } + res.CurrentInfraMachine = currentInfraMachine + res.DesiredInfraMachine = desiredInfraMachine + if !matches { logMessages = append(logMessages, reason) conditionMessages = append(conditionMessages, fmt.Sprintf("%s is not up-to-date", machine.Spec.InfrastructureRef.Kind)) } @@ -118,11 +151,11 @@ func matchesMachineSpec(infraMachines map[string]*unstructured.Unstructured, kub // matches a given KCP infra template and if it doesn't match returns the reason why. // Note: Differences to the labels and annotations on the infrastructure machine are not considered for matching // criteria, because changes to labels and annotations are propagated in-place to the infrastructure machines. -func matchesInfraMachine(infraMachines map[string]*unstructured.Unstructured, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (string, bool) { +func matchesInfraMachine(ctx context.Context, c client.Client, infraMachines map[string]*unstructured.Unstructured, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, *unstructured.Unstructured, *unstructured.Unstructured, bool, error) { currentInfraMachine, found := infraMachines[machine.Name] if !found { // Return true here because failing to get infrastructure machine should not be considered as unmatching. - return "", true + return "", nil, nil, true, nil } clonedFromName, ok1 := currentInfraMachine.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation] @@ -131,7 +164,12 @@ func matchesInfraMachine(infraMachines map[string]*unstructured.Unstructured, kc // All kcp cloned infra machines should have this annotation. // Missing the annotation may be due to older version machines or adopted machines. // Should not be considered as mismatch. - return "", true + return "", nil, nil, true, nil + } + + desiredInfraMachine, err := desiredstate.ComputeInfraMachine(ctx, c, kcp, cluster, machine.Name, currentInfraMachine) + if err != nil { + return "", nil, nil, false, errors.Wrapf(err, "failed to match InfraMachine") } // Check if the machine's infrastructure reference has been created from the current KCP infrastructure template. @@ -139,69 +177,101 @@ func matchesInfraMachine(infraMachines map[string]*unstructured.Unstructured, kc clonedFromGroupKind != kcp.Spec.MachineTemplate.Spec.InfrastructureRef.GroupKind().String() { return fmt.Sprintf("Infrastructure template on KCP rotated from %s %s to %s %s", clonedFromGroupKind, clonedFromName, - kcp.Spec.MachineTemplate.Spec.InfrastructureRef.GroupKind().String(), kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Name), false + kcp.Spec.MachineTemplate.Spec.InfrastructureRef.GroupKind().String(), kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Name), currentInfraMachine, desiredInfraMachine, false, nil } - return "", true + return "", currentInfraMachine, desiredInfraMachine, true, nil } // matchesKubeadmConfig checks if machine's KubeadmConfigSpec is equivalent with KCP's KubeadmConfigSpec. // Note: Differences to the labels and annotations on the KubeadmConfig are not considered for matching // criteria, because changes to labels and annotations are propagated in-place to KubeadmConfig. -func matchesKubeadmConfig(kubeadmConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (string, bool, error) { +func matchesKubeadmConfig(kubeadmConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, *bootstrapv1.KubeadmConfig, *bootstrapv1.KubeadmConfig, bool, error) { bootstrapRef := machine.Spec.Bootstrap.ConfigRef if !bootstrapRef.IsDefined() { // Missing bootstrap reference should not be considered as unmatching. // This is a safety precaution to avoid selecting machines that are broken, which in the future should be remediated separately. - return "", true, nil + return "", nil, nil, true, nil } currentKubeadmConfig, found := kubeadmConfigs[machine.Name] if !found { // Return true here because failing to get KubeadmConfig should not be considered as unmatching. // This is a safety precaution to avoid rolling out machines if the client or the api-server is misbehaving. - return "", true, nil + return "", nil, nil, true, nil } - // takes the KubeadmConfigSpec from KCP and applies the transformations required - // to allow a comparison with the KubeadmConfig referenced from the machine. - desiredKubeadmConfig := getAdjustedKcpConfig(kcp, currentKubeadmConfig) - - desiredKubeadmConfigForDiff, currentKubeadmConfigForDiff, err := PrepareKubeadmConfigsForDiff(desiredKubeadmConfig, currentKubeadmConfig, kcp) + // Note: We compute a KubeadmConfig with joinConfiguration. + // If necessary, PrepareKubeadmConfigsForDiff below will convert initConfiguration to joinConfiguration + // in currentKubeadmConfig. So that we compare two KubeadmConfigs that both have joinConfigurations. + desiredKubeadmConfigWithJoin, err := desiredstate.ComputeKubeadmConfig(kcp, cluster, true, machine.Name, currentKubeadmConfig) if err != nil { - return "", false, errors.Wrapf(err, "failed to match KubeadmConfig") + return "", nil, nil, false, errors.Wrapf(err, "failed to match KubeadmConfig") } + desiredKubeadmConfigWithJoinForDiff, currentKubeadmConfigWithJoinForDiff := PrepareKubeadmConfigsForDiff(desiredKubeadmConfigWithJoin, currentKubeadmConfig, true) // Check if current and desired KubeadmConfigs match. - // NOTE: only one between init configuration and join configuration is set on a machine, depending - // on the fact that the machine was the initial control plane node or a joining control plane node. - match, diff, err := compare.Diff(¤tKubeadmConfigForDiff.Spec, &desiredKubeadmConfigForDiff.Spec) + // Note: desiredKubeadmConfigWithJoinForDiff has been computed for a kubeadm join. + // Note: currentKubeadmConfigWithJoinForDiff has been migrated from init to join, if currentKubeadmConfig was for a kubeadm init. + match, diff, err := compare.Diff(¤tKubeadmConfigWithJoinForDiff.Spec, &desiredKubeadmConfigWithJoinForDiff.Spec) if err != nil { - return "", false, errors.Wrapf(err, "failed to match KubeadmConfig") + return "", nil, nil, false, errors.Wrapf(err, "failed to match KubeadmConfig") } if !match { - return fmt.Sprintf("Machine KubeadmConfig is outdated: diff: %s", diff), false, nil + // Note: KCP initConfiguration and joinConfiguration should be configured identically. + // But if they are not configured identically and the currentKubeadmConfig is for init we still + // want to avoid unnecessary rollouts. + // Accordingly, we also have to compare the currentKubeadmConfig against a desiredKubeadmConfig + // computed for init to avoid unnecessary rollouts. + // Note: In any case we are going to use desiredKubeadmConfigWithJoin for in-place updates and return it accordingly. + if currentKubeadmConfig.IsForInit() { + desiredKubeadmConfigWithInit, err := desiredstate.ComputeKubeadmConfig(kcp, cluster, false, machine.Name, currentKubeadmConfig) + if err != nil { + return "", nil, nil, false, errors.Wrapf(err, "failed to match KubeadmConfig") + } + desiredKubeadmConfigWithInitForDiff, currentKubeadmConfigWithInitForDiff := PrepareKubeadmConfigsForDiff(desiredKubeadmConfigWithInit, currentKubeadmConfig, false) + + // Check if current and desired KubeadmConfigs match. + // Note: desiredKubeadmConfigWithInitForDiff has been computed for a kubeadm init. + // Note: currentKubeadmConfigWithInitForDiff is for a kubeadm init. + if match, _, err := compare.Diff(¤tKubeadmConfigWithInitForDiff.Spec, &desiredKubeadmConfigWithInitForDiff.Spec); err != nil { + return "", nil, nil, false, errors.Wrapf(err, "failed to match KubeadmConfig") + } else if match { + return "", currentKubeadmConfig, desiredKubeadmConfigWithJoin, true, nil + } + } + + return fmt.Sprintf("Machine KubeadmConfig is outdated: diff: %s", diff), currentKubeadmConfig, desiredKubeadmConfigWithJoin, false, nil } - return "", true, nil + return "", currentKubeadmConfig, desiredKubeadmConfigWithJoin, true, nil } // PrepareKubeadmConfigsForDiff cleans up all fields that are not relevant for the comparison. -func PrepareKubeadmConfigsForDiff(desiredKubeadmConfig, currentKubeadmConfig *bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane) (desired *bootstrapv1.KubeadmConfig, current *bootstrapv1.KubeadmConfig, _ error) { +func PrepareKubeadmConfigsForDiff(desiredKubeadmConfig, currentKubeadmConfig *bootstrapv1.KubeadmConfig, convertCurrentInitConfigurationToJoinConfiguration bool) (desired *bootstrapv1.KubeadmConfig, current *bootstrapv1.KubeadmConfig) { // DeepCopy to ensure the passed in KubeadmConfigs are not modified. // This has to be done because we eventually want to be able to apply the desiredKubeadmConfig // (without the modifications that we make here). desiredKubeadmConfig = desiredKubeadmConfig.DeepCopy() currentKubeadmConfig = currentKubeadmConfig.DeepCopy() - // Default feature gates like in initializeControlPlane / scaleUpControlPlane. - // Note: Changes in feature gates can still trigger rollouts. - // TODO(in-place) refactor this area so the desired KubeadmConfig is not computed in multiple places independently. - parsedVersion, err := semver.ParseTolerant(kcp.Spec.Version) - if err != nil { - return nil, nil, errors.Wrapf(err, "failed to parse Kubernetes version %q", kcp.Spec.Version) + if convertCurrentInitConfigurationToJoinConfiguration && currentKubeadmConfig.IsForInit() { + // Convert InitConfiguration to JoinConfiguration + currentKubeadmConfig.Spec.JoinConfiguration.Timeouts = currentKubeadmConfig.Spec.InitConfiguration.Timeouts + currentKubeadmConfig.Spec.JoinConfiguration.Patches = currentKubeadmConfig.Spec.InitConfiguration.Patches + currentKubeadmConfig.Spec.JoinConfiguration.SkipPhases = currentKubeadmConfig.Spec.InitConfiguration.SkipPhases + currentKubeadmConfig.Spec.JoinConfiguration.NodeRegistration = currentKubeadmConfig.Spec.InitConfiguration.NodeRegistration + if currentKubeadmConfig.Spec.InitConfiguration.LocalAPIEndpoint.IsDefined() { + currentKubeadmConfig.Spec.JoinConfiguration.ControlPlane = &bootstrapv1.JoinControlPlane{ + LocalAPIEndpoint: currentKubeadmConfig.Spec.InitConfiguration.LocalAPIEndpoint, + } + } + currentKubeadmConfig.Spec.InitConfiguration = bootstrapv1.InitConfiguration{} + + // CACertPath can only be configured for join. + // The CACertPath should never trigger a rollout of Machines created via kubeadm init. + currentKubeadmConfig.Spec.JoinConfiguration.CACertPath = desiredKubeadmConfig.Spec.JoinConfiguration.CACertPath } - DefaultFeatureGates(&desiredKubeadmConfig.Spec, parsedVersion) // Ignore ControlPlaneEndpoint which is added on the Machine KubeadmConfig by CABPK. // Note: ControlPlaneEndpoint should also never change for a Cluster, so no reason to trigger a rollout because of that. @@ -237,27 +307,7 @@ func PrepareKubeadmConfigsForDiff(desiredKubeadmConfig, currentKubeadmConfig *bo dropOmittableFields(&desiredKubeadmConfig.Spec) dropOmittableFields(¤tKubeadmConfig.Spec) - return desiredKubeadmConfig, currentKubeadmConfig, nil -} - -// getAdjustedKcpConfig takes the KubeadmConfigSpec from KCP and applies the transformations required -// to allow a comparison with the KubeadmConfig referenced from the machine. -// NOTE: The KCP controller applies a set of transformations when creating a KubeadmConfig referenced from the machine; -// those transformations are implemented in ControlPlane.InitialControlPlaneConfig() and ControlPlane.JoinControlPlaneConfig(). -func getAdjustedKcpConfig(kcp *controlplanev1.KubeadmControlPlane, machineConfig *bootstrapv1.KubeadmConfig) *bootstrapv1.KubeadmConfig { - kcpConfig := kcp.Spec.KubeadmConfigSpec.DeepCopy() - - // if Machine's JoinConfiguration is set, this is a joining control plane machine, so empty out the InitConfiguration; - // otherwise empty out the JoinConfiguration. - // Note: a KubeadmConfig for a joining control plane must have at least joinConfiguration.controlPlane and joinConfiguration.discovery to be set for joining; - // if those fields are missing in the KCP config, CABPK sets them. - if machineConfig.Spec.JoinConfiguration.IsDefined() { - kcpConfig.InitConfiguration = bootstrapv1.InitConfiguration{} - } else { - kcpConfig.JoinConfiguration = bootstrapv1.JoinConfiguration{} - } - - return &bootstrapv1.KubeadmConfig{Spec: *kcpConfig} + return desiredKubeadmConfig, currentKubeadmConfig } // dropOmittableFields makes the comparison tolerant to omittable fields being set in the go struct. It applies to: diff --git a/controlplane/kubeadm/internal/filters_test.go b/controlplane/kubeadm/internal/filters_test.go index 3522a0c2322b..dfe93950c137 100644 --- a/controlplane/kubeadm/internal/filters_test.go +++ b/controlplane/kubeadm/internal/filters_test.go @@ -24,82 +24,22 @@ import ( "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client/fake" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" kubeadmtypes "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstream" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" + "sigs.k8s.io/cluster-api/util/test/builder" ) -func TestGetAdjustedKcpConfig(t *testing.T) { - t.Run("if the machine is the first control plane, kcp config should get InitConfiguration", func(t *testing.T) { - g := NewWithT(t) - kcp := &controlplanev1.KubeadmControlPlane{ - Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{ - Patches: bootstrapv1.Patches{ - Directory: "/tmp/patches", - }, - }, - JoinConfiguration: bootstrapv1.JoinConfiguration{ - Patches: bootstrapv1.Patches{ - Directory: "/tmp/patches", - }, - }, - }, - }, - } - machineConfig := &bootstrapv1.KubeadmConfig{ - Spec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{ // first control-plane - Patches: bootstrapv1.Patches{ - Directory: "/tmp/patches", - }, - }, - }, - } - kcpConfig := getAdjustedKcpConfig(kcp, machineConfig) - g.Expect(kcpConfig.Spec.InitConfiguration.IsDefined()).To(BeTrue()) - g.Expect(kcpConfig.Spec.JoinConfiguration.IsDefined()).To(BeFalse()) - }) - t.Run("if the machine is a joining control plane, kcp config should get JoinConfiguration", func(t *testing.T) { - g := NewWithT(t) - kcp := &controlplanev1.KubeadmControlPlane{ - Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{ - Patches: bootstrapv1.Patches{ - Directory: "/tmp/patches", - }, - }, - JoinConfiguration: bootstrapv1.JoinConfiguration{ - Patches: bootstrapv1.Patches{ - Directory: "/tmp/patches", - }, - }, - }, - }, - } - machineConfig := &bootstrapv1.KubeadmConfig{ - Spec: bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{ // joining control-plane - Patches: bootstrapv1.Patches{ - Directory: "/tmp/patches", - }, - }, - }, - } - kcpConfig := getAdjustedKcpConfig(kcp, machineConfig) - g.Expect(kcpConfig.Spec.InitConfiguration.IsDefined()).To(BeFalse()) - g.Expect(kcpConfig.Spec.JoinConfiguration.IsDefined()).To(BeTrue()) - }) -} - func TestMatchesKubeadmConfig(t *testing.T) { t.Run("returns true if Machine configRef is not defined", func(t *testing.T) { g := NewWithT(t) @@ -113,7 +53,7 @@ func TestMatchesKubeadmConfig(t *testing.T) { }, }, } - reason, match, err := matchesKubeadmConfig(map[string]*bootstrapv1.KubeadmConfig{}, nil, m) + reason, _, _, match, err := matchesKubeadmConfig(map[string]*bootstrapv1.KubeadmConfig{}, nil, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -134,7 +74,7 @@ func TestMatchesKubeadmConfig(t *testing.T) { }, }, } - reason, match, err := matchesKubeadmConfig(map[string]*bootstrapv1.KubeadmConfig{}, nil, m) + reason, _, _, match, err := matchesKubeadmConfig(map[string]*bootstrapv1.KubeadmConfig{}, nil, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -179,8 +119,10 @@ func TestMatchesKubeadmConfig(t *testing.T) { machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: machineConfig, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -220,8 +162,10 @@ func TestMatchesKubeadmConfig(t *testing.T) { machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: machineConfig, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -257,7 +201,7 @@ func TestMatchesKubeadmConfig(t *testing.T) { Spec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: bootstrapv1.ClusterConfiguration{ FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: true, + desiredstate.ControlPlaneKubeletLocalMode: true, }, }, }, @@ -265,8 +209,10 @@ func TestMatchesKubeadmConfig(t *testing.T) { machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: machineConfig, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -317,8 +263,10 @@ func TestMatchesKubeadmConfig(t *testing.T) { machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: machineConfig, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -362,8 +310,10 @@ func TestMatchesKubeadmConfig(t *testing.T) { machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: machineConfig, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) g.Expect(match).To(BeFalse()) g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ ClusterConfiguration: v1beta2.ClusterConfiguration{ @@ -387,8 +337,32 @@ func TestMatchesKubeadmConfig(t *testing.T) { Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, - InitConfiguration: bootstrapv1.InitConfiguration{}, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + Timeouts: bootstrapv1.Timeouts{ + ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](5), + KubernetesAPICallSeconds: ptr.To[int32](7), + }, + Patches: bootstrapv1.Patches{ + Directory: "/test/patches", + }, + SkipPhases: []string{"skip-phase"}, + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + KubeletExtraArgs: []bootstrapv1.Arg{ + { + Name: "v", + Value: ptr.To("8"), + }, + }, + }, + ControlPlane: &bootstrapv1.JoinControlPlane{ + LocalAPIEndpoint: bootstrapv1.APIEndpoint{ + AdvertiseAddress: "1.2.3.4", + BindPort: 6443, + }, + }, + CACertPath: "/tmp/cacert", // This field doesn't exist in InitConfiguration, so it should not lead to a rollout. + }, }, Version: "v1.30.0", }, @@ -415,12 +389,38 @@ func TestMatchesKubeadmConfig(t *testing.T) { Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{}, + // InitConfiguration will be converted to JoinConfiguration and then compared against the JoinConfiguration from KCP. + InitConfiguration: bootstrapv1.InitConfiguration{ + Timeouts: bootstrapv1.Timeouts{ + ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](5), + KubernetesAPICallSeconds: ptr.To[int32](7), + }, + Patches: bootstrapv1.Patches{ + Directory: "/test/patches", + }, + SkipPhases: []string{"skip-phase"}, + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + KubeletExtraArgs: []bootstrapv1.Arg{ + { + Name: "v", + Value: ptr.To("8"), + }, + }, + }, + LocalAPIEndpoint: bootstrapv1.APIEndpoint{ + AdvertiseAddress: "1.2.3.4", + BindPort: 6443, + }, + }, }, }, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig.IsForJoin()).To(BeTrue()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -435,7 +435,11 @@ func TestMatchesKubeadmConfig(t *testing.T) { Name: "A new name", // This is a change }, }, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "A new name", // This is a change + }, + }, }, Version: "v1.30.0", }, @@ -470,13 +474,16 @@ func TestMatchesKubeadmConfig(t *testing.T) { }, }, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig.IsForJoin()).To(BeTrue()) g.Expect(match).To(BeFalse()) g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ ClusterConfiguration: {}, - InitConfiguration: v1beta2.InitConfiguration{ - BootstrapTokens: nil, + InitConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + JoinConfiguration: v1beta2.JoinConfiguration{ NodeRegistration: v1beta2.NodeRegistrationOptions{ - Name: "An old name", + Name: "A new name", @@ -484,13 +491,13 @@ func TestMatchesKubeadmConfig(t *testing.T) { Taints: nil, ... // 4 identical fields }, - LocalAPIEndpoint: {}, - SkipPhases: nil, - ... // 2 identical fields + CACertPath: "", + Discovery: {}, + ... // 4 identical fields }, - JoinConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, - Files: nil, - ... // 10 identical fields + Files: nil, + DiskSetup: {}, + ... // 9 identical fields }`)) }) t.Run("returns true if JoinConfiguration is equal", func(t *testing.T) { @@ -500,7 +507,11 @@ func TestMatchesKubeadmConfig(t *testing.T) { KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, InitConfiguration: bootstrapv1.InitConfiguration{}, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "A new name", + }, + }, }, Version: "v1.30.0", }, @@ -527,12 +538,19 @@ func TestMatchesKubeadmConfig(t *testing.T) { Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "A new name", + }, + }, }, }, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig.IsForJoin()).To(BeTrue()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -544,7 +562,10 @@ func TestMatchesKubeadmConfig(t *testing.T) { ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, InitConfiguration: bootstrapv1.InitConfiguration{}, JoinConfiguration: bootstrapv1.JoinConfiguration{ - // Gets removed because Discovery is not relevant for the rollout decision. + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "A new name", + }, + // Discovery gets removed because Discovery is not relevant for the rollout decision. Discovery: bootstrapv1.Discovery{TLSBootstrapToken: "aaa"}, }, }, @@ -574,14 +595,20 @@ func TestMatchesKubeadmConfig(t *testing.T) { }, Spec: bootstrapv1.KubeadmConfigSpec{ JoinConfiguration: bootstrapv1.JoinConfiguration{ - // Gets removed because Discovery is not relevant for the rollout decision. + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "A new name", + }, + // Discovery gets removed because Discovery is not relevant for the rollout decision. Discovery: bootstrapv1.Discovery{TLSBootstrapToken: "bbb"}, }, }, }, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig.IsForJoin()).To(BeTrue()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -593,6 +620,9 @@ func TestMatchesKubeadmConfig(t *testing.T) { ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, InitConfiguration: bootstrapv1.InitConfiguration{}, JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "A new name", + }, ControlPlane: nil, // Control plane configuration missing in KCP }, }, @@ -622,16 +652,156 @@ func TestMatchesKubeadmConfig(t *testing.T) { }, Spec: bootstrapv1.KubeadmConfigSpec{ JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "A new name", + }, ControlPlane: &bootstrapv1.JoinControlPlane{}, // Machine gets a default JoinConfiguration.ControlPlane from CABPK }, }, }, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig.IsForJoin()).To(BeTrue()) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) + }) + t.Run("returns true if JoinConfiguration is not equal, but InitConfiguration is", func(t *testing.T) { + g := NewWithT(t) + kcp := &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, + InitConfiguration: bootstrapv1.InitConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + }, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "Different name", + }, + }, + }, + Version: "v1.30.0", + }, + } + m := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Kind: "KubeadmConfig", + Name: "test", + APIGroup: bootstrapv1.GroupVersion.Group, + }, + }, + }, + } + machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ + m.Name: { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + InitConfiguration: bootstrapv1.InitConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + }, + }, + }, + } + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig.IsForJoin()).To(BeTrue()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) + t.Run("returns false if JoinConfiguration is not equal, and InitConfiguration is also not equal", func(t *testing.T) { + g := NewWithT(t) + kcp := &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, + InitConfiguration: bootstrapv1.InitConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "Different name", + }, + }, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "Different name", + }, + }, + }, + Version: "v1.30.0", + }, + } + m := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Kind: "KubeadmConfig", + Name: "test", + APIGroup: bootstrapv1.GroupVersion.Group, + }, + }, + }, + } + machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ + m.Name: { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + InitConfiguration: bootstrapv1.InitConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + }, + }, + }, + } + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig.IsForJoin()).To(BeTrue()) + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(Equal(`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ + ClusterConfiguration: {}, + InitConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + JoinConfiguration: v1beta2.JoinConfiguration{ + NodeRegistration: v1beta2.NodeRegistrationOptions{ +- Name: "name", ++ Name: "Different name", + CRISocket: "", + Taints: nil, + ... // 4 identical fields + }, + CACertPath: "", + Discovery: {}, + ... // 4 identical fields + }, + Files: nil, + DiskSetup: {}, + ... // 9 identical fields + }`)) + }) t.Run("returns false if JoinConfiguration has other differences in ControlPlane", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ @@ -640,6 +810,9 @@ func TestMatchesKubeadmConfig(t *testing.T) { ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, InitConfiguration: bootstrapv1.InitConfiguration{}, JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, ControlPlane: nil, // Control plane configuration missing in KCP }, }, @@ -669,6 +842,9 @@ func TestMatchesKubeadmConfig(t *testing.T) { }, Spec: bootstrapv1.KubeadmConfigSpec{ JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, ControlPlane: &bootstrapv1.JoinControlPlane{ LocalAPIEndpoint: bootstrapv1.APIEndpoint{ AdvertiseAddress: "1.2.3.4", @@ -679,14 +855,17 @@ func TestMatchesKubeadmConfig(t *testing.T) { }, }, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig.IsForJoin()).To(BeTrue()) g.Expect(match).To(BeFalse()) g.Expect(reason).To(Equal(`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ ClusterConfiguration: {}, InitConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, JoinConfiguration: v1beta2.JoinConfiguration{ - NodeRegistration: {ImagePullPolicy: "IfNotPresent"}, + NodeRegistration: {Name: "name", ImagePullPolicy: "IfNotPresent"}, CACertPath: "", Discovery: {}, - ControlPlane: &v1beta2.JoinControlPlane{ @@ -748,8 +927,11 @@ func TestMatchesKubeadmConfig(t *testing.T) { }, }, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig.IsForJoin()).To(BeTrue()) g.Expect(match).To(BeFalse()) g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ ClusterConfiguration: {}, @@ -813,8 +995,11 @@ func TestMatchesKubeadmConfig(t *testing.T) { }, }, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + // Can't check if desiredKubeadmConfig is for join because the test case is that JoinConfiguration is not set anymore. g.Expect(match).To(BeFalse()) g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ ClusterConfiguration: {}, @@ -846,11 +1031,13 @@ func TestMatchesKubeadmConfig(t *testing.T) { }, InitConfiguration: bootstrapv1.InitConfiguration{ NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", KubeletExtraArgs: []bootstrapv1.Arg{}, }, }, JoinConfiguration: bootstrapv1.JoinConfiguration{ NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", KubeletExtraArgs: []bootstrapv1.Arg{}, }, }, @@ -883,12 +1070,19 @@ func TestMatchesKubeadmConfig(t *testing.T) { Spec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, InitConfiguration: bootstrapv1.InitConfiguration{}, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + }, }, }, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig.IsForJoin()).To(BeTrue()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -898,6 +1092,11 @@ func TestMatchesKubeadmConfig(t *testing.T) { Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ Format: bootstrapv1.CloudConfig, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + }, }, Version: "v1.30.0", }, @@ -925,11 +1124,19 @@ func TestMatchesKubeadmConfig(t *testing.T) { }, Spec: bootstrapv1.KubeadmConfigSpec{ Format: "", + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + }, }, }, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig.IsForJoin()).To(BeTrue()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -940,8 +1147,12 @@ func TestMatchesKubeadmConfig(t *testing.T) { KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, InitConfiguration: bootstrapv1.InitConfiguration{}, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, - Files: []bootstrapv1.File{{Path: "/tmp/foo"}}, // This is a change + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + }, + Files: []bootstrapv1.File{{Path: "/tmp/foo"}}, // This is a change }, Version: "v1.30.0", }, @@ -968,17 +1179,24 @@ func TestMatchesKubeadmConfig(t *testing.T) { Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + }, }, }, } - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig.IsForJoin()).To(BeTrue()) g.Expect(match).To(BeFalse()) g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ ClusterConfiguration: {}, InitConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, - JoinConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + JoinConfiguration: {NodeRegistration: {Name: "name", ImagePullPolicy: "IfNotPresent"}}, - Files: nil, + Files: []v1beta2.File{{Path: "/tmp/foo"}}, DiskSetup: {}, @@ -1038,7 +1256,7 @@ func TestMatchesKubeadmConfig(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Annotations = nil machineConfigs[m.Name].Labels = nil - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, _, _, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -1048,7 +1266,7 @@ func TestMatchesKubeadmConfig(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Annotations = kcp.Spec.MachineTemplate.ObjectMeta.Annotations machineConfigs[m.Name].Labels = nil - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, _, _, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -1058,7 +1276,7 @@ func TestMatchesKubeadmConfig(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Annotations = nil machineConfigs[m.Name].Labels = kcp.Spec.MachineTemplate.ObjectMeta.Labels - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, _, _, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -1068,7 +1286,7 @@ func TestMatchesKubeadmConfig(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Labels = kcp.Spec.MachineTemplate.ObjectMeta.Labels machineConfigs[m.Name].Annotations = kcp.Spec.MachineTemplate.ObjectMeta.Annotations - reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) + reason, _, _, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -1089,7 +1307,8 @@ func TestMatchesInfraMachine(t *testing.T) { }, }, } - reason, match := matchesInfraMachine(map[string]*unstructured.Unstructured{}, kcp, machine) + reason, _, _, match, err := matchesInfraMachine(t.Context(), nil, map[string]*unstructured.Unstructured{}, kcp, &clusterv1.Cluster{}, machine) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -1111,20 +1330,32 @@ func TestMatchesInfraMachine(t *testing.T) { }, Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - Kind: "GenericMachineTemplate", - Name: "infra-foo", - APIGroup: "generic.io", + APIGroup: builder.InfrastructureGroupVersion.Group, + Kind: builder.TestInfrastructureMachineTemplateKind, + Name: "infra-machine-template1", }, }, }, }, } + + infraMachineTemplate := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineTemplateKind, + "metadata": map[string]interface{}{ + "name": "infra-machine-template1", + "namespace": "default", + }, + }, + } + m := &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - Kind: "GenericMachine", - Name: "infra-foo", - APIGroup: "generic.io", + APIGroup: builder.InfrastructureGroupVersion.Group, + Kind: builder.TestInfrastructureMachineKind, + Name: "infra-config1", }, }, } @@ -1132,8 +1363,8 @@ func TestMatchesInfraMachine(t *testing.T) { infraConfigs := map[string]*unstructured.Unstructured{ m.Name: { Object: map[string]interface{}{ - "kind": "InfrastructureMachine", - "apiVersion": clusterv1.GroupVersionInfrastructure.String(), + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineKind, "metadata": map[string]interface{}{ "name": "infra-config1", "namespace": "default", @@ -1141,160 +1372,86 @@ func TestMatchesInfraMachine(t *testing.T) { }, }, } + scheme := runtime.NewScheme() + _ = apiextensionsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(builder.TestInfrastructureMachineTemplateCRD, infraMachineTemplate).Build() - t.Run("by returning true if neither labels or annotations match", func(t *testing.T) { + t.Run("by returning true if annotations don't exist", func(t *testing.T) { g := NewWithT(t) - infraConfigs[m.Name].SetAnnotations(map[string]string{ - clusterv1.TemplateClonedFromNameAnnotation: "infra-foo", - clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericMachineTemplate.generic.io", - }) - infraConfigs[m.Name].SetLabels(nil) - reason, match := matchesInfraMachine(infraConfigs, kcp, m) + infraConfigs[m.Name].SetAnnotations(map[string]string{}) + reason, _, _, match, err := matchesInfraMachine(t.Context(), c, infraConfigs, kcp, &clusterv1.Cluster{}, m) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) - t.Run("by returning true if only labels don't match", func(t *testing.T) { + t.Run("by returning false if neither Name nor GroupKind matches", func(t *testing.T) { g := NewWithT(t) infraConfigs[m.Name].SetAnnotations(map[string]string{ - clusterv1.TemplateClonedFromNameAnnotation: "infra-foo", - clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericMachineTemplate.generic.io", - "test": "annotation", + clusterv1.TemplateClonedFromNameAnnotation: "different-infra-machine-template1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "DifferentTestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", }) - infraConfigs[m.Name].SetLabels(nil) - reason, match := matchesInfraMachine(infraConfigs, kcp, m) - g.Expect(match).To(BeTrue()) - g.Expect(reason).To(BeEmpty()) + reason, _, _, match, err := matchesInfraMachine(t.Context(), c, infraConfigs, kcp, &clusterv1.Cluster{}, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(Equal("Infrastructure template on KCP rotated from DifferentTestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io different-infra-machine-template1 to TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io infra-machine-template1")) }) - t.Run("by returning true if only annotations don't match", func(t *testing.T) { + t.Run("by returning false if only GroupKind matches", func(t *testing.T) { g := NewWithT(t) infraConfigs[m.Name].SetAnnotations(map[string]string{ - clusterv1.TemplateClonedFromNameAnnotation: "infra-foo", - clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericMachineTemplate.generic.io", + clusterv1.TemplateClonedFromNameAnnotation: "different-infra-machine-template1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", }) - infraConfigs[m.Name].SetLabels(kcp.Spec.MachineTemplate.ObjectMeta.Labels) - reason, match := matchesInfraMachine(infraConfigs, kcp, m) - g.Expect(match).To(BeTrue()) - g.Expect(reason).To(BeEmpty()) + reason, _, _, match, err := matchesInfraMachine(t.Context(), c, infraConfigs, kcp, &clusterv1.Cluster{}, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(Equal("Infrastructure template on KCP rotated from TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io different-infra-machine-template1 to TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io infra-machine-template1")) }) - t.Run("by returning true if both labels and annotations match", func(t *testing.T) { + t.Run("by returning false if only Name matches", func(t *testing.T) { g := NewWithT(t) infraConfigs[m.Name].SetAnnotations(map[string]string{ - clusterv1.TemplateClonedFromNameAnnotation: "infra-foo", - clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericMachineTemplate.generic.io", - "test": "annotation", + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "DifferentTestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", }) - infraConfigs[m.Name].SetLabels(kcp.Spec.MachineTemplate.ObjectMeta.Labels) - reason, match := matchesInfraMachine(infraConfigs, kcp, m) - g.Expect(match).To(BeTrue()) - g.Expect(reason).To(BeEmpty()) + reason, _, _, match, err := matchesInfraMachine(t.Context(), c, infraConfigs, kcp, &clusterv1.Cluster{}, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(Equal("Infrastructure template on KCP rotated from DifferentTestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io infra-machine-template1 to TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io infra-machine-template1")) }) - }) -} - -func TestMatchesInfraMachine_WithClonedFromAnnotations(t *testing.T) { - kcp := &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - Kind: "GenericMachineTemplate", - Name: "infra-foo", - APIGroup: "generic.io", - }, - }, - }, - }, - } - machine := &clusterv1.Machine{ - Spec: clusterv1.MachineSpec{ - InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: clusterv1.GroupVersionInfrastructure.Group, - Kind: "InfrastructureMachine", - Name: "infra-config1", - }, - }, - } - tests := []struct { - name string - annotations map[string]interface{} - expectMatch bool - expectReason string - }{ - { - name: "returns true if annotations don't exist", - annotations: map[string]interface{}{}, - expectMatch: true, - }, - { - name: "returns false if annotations don't match anything", - annotations: map[string]interface{}{ - clusterv1.TemplateClonedFromNameAnnotation: "barfoo1", - clusterv1.TemplateClonedFromGroupKindAnnotation: "barfoo2", - }, - expectMatch: false, - expectReason: "Infrastructure template on KCP rotated from barfoo2 barfoo1 to GenericMachineTemplate.generic.io infra-foo", - }, - { - name: "returns false if TemplateClonedFromNameAnnotation matches but TemplateClonedFromGroupKindAnnotation doesn't", - annotations: map[string]interface{}{ - clusterv1.TemplateClonedFromNameAnnotation: "infra-foo", - clusterv1.TemplateClonedFromGroupKindAnnotation: "barfoo2", - }, - expectMatch: false, - expectReason: "Infrastructure template on KCP rotated from barfoo2 infra-foo to GenericMachineTemplate.generic.io infra-foo", - }, - { - name: "returns true if both annotations match", - annotations: map[string]interface{}{ - clusterv1.TemplateClonedFromNameAnnotation: "infra-foo", - clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericMachineTemplate.generic.io", - }, - expectMatch: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run("by returning true if both Name and GroupKind match", func(t *testing.T) { g := NewWithT(t) - infraConfigs := map[string]*unstructured.Unstructured{ - machine.Name: { - Object: map[string]interface{}{ - "kind": "InfrastructureMachine", - "apiVersion": clusterv1.GroupVersionInfrastructure.String(), - "metadata": map[string]interface{}{ - "name": "infra-config1", - "namespace": "default", - "annotations": tt.annotations, - }, - }, - }, - } - reason, match := matchesInfraMachine(infraConfigs, kcp, machine) - g.Expect(match).To(Equal(tt.expectMatch)) - g.Expect(reason).To(Equal(tt.expectReason)) + infraConfigs[m.Name].SetAnnotations(map[string]string{ + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", + }) + reason, _, _, match, err := matchesInfraMachine(t.Context(), c, infraConfigs, kcp, &clusterv1.Cluster{}, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(reason).To(BeEmpty()) + g.Expect(match).To(BeTrue()) }) - } + }) } func TestUpToDate(t *testing.T) { reconciliationTime := metav1.Now() defaultKcp := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "kcp", + }, Spec: controlplanev1.KubeadmControlPlaneSpec{ Replicas: nil, Version: "v1.30.0", MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: clusterv1.GroupVersionInfrastructure.Group, - Kind: "AWSMachineTemplate", - Name: "template1", + APIGroup: builder.InfrastructureGroupVersion.Group, + Kind: builder.TestInfrastructureMachineTemplateKind, + Name: "infra-machine-template1", }, }, }, @@ -1311,6 +1468,28 @@ func TestUpToDate(t *testing.T) { }, }, } + + infraMachineTemplate1 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineTemplateKind, + "metadata": map[string]interface{}{ + "name": "infra-machine-template1", + "namespace": "default", + }, + }, + } + infraMachineTemplate2 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineTemplateKind, + "metadata": map[string]interface{}{ + "name": "infra-machine-template2", + "namespace": "default", + }, + }, + } + defaultMachine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ CreationTimestamp: metav1.Time{Time: reconciliationTime.Add(-2 * 24 * time.Hour)}, // two days ago. @@ -1326,7 +1505,7 @@ func TestUpToDate(t *testing.T) { }, InfrastructureRef: clusterv1.ContractVersionedObjectReference{ APIGroup: clusterv1.GroupVersionInfrastructure.Group, - Kind: "AWSMachine", + Kind: "TestInfrastructureMachine", Name: "infra-machine1", }, }, @@ -1338,14 +1517,14 @@ func TestUpToDate(t *testing.T) { defaultInfraConfigs := map[string]*unstructured.Unstructured{ defaultMachine.Name: { Object: map[string]interface{}{ - "kind": "AWSMachine", + "kind": builder.TestInfrastructureMachineKind, "apiVersion": clusterv1.GroupVersionInfrastructure.String(), "metadata": map[string]interface{}{ - "name": "infra-config1", + "name": "infra-machine1", "namespace": "default", "annotations": map[string]interface{}{ - "cluster.x-k8s.io/cloned-from-name": "template1", - "cluster.x-k8s.io/cloned-from-groupkind": "AWSMachineTemplate.infrastructure.cluster.x-k8s.io", + "cluster.x-k8s.io/cloned-from-name": "infra-machine-template1", + "cluster.x-k8s.io/cloned-from-groupkind": builder.InfrastructureGroupVersion.WithKind(builder.TestInfrastructureMachineTemplateKind).GroupKind().String(), }, }, }, @@ -1367,24 +1546,26 @@ func TestUpToDate(t *testing.T) { } tests := []struct { - name string - kcp *controlplanev1.KubeadmControlPlane - machine *clusterv1.Machine - infraConfigs map[string]*unstructured.Unstructured - machineConfigs map[string]*bootstrapv1.KubeadmConfig - expectUptoDate bool - expectLogMessages []string - expectConditionMessages []string + name string + kcp *controlplanev1.KubeadmControlPlane + machine *clusterv1.Machine + infraConfigs map[string]*unstructured.Unstructured + machineConfigs map[string]*bootstrapv1.KubeadmConfig + expectUptoDate bool + expectEligibleForInPlaceUpdate bool + expectLogMessages []string + expectConditionMessages []string }{ { - name: "machine up-to-date", - kcp: defaultKcp, - machine: defaultMachine, - infraConfigs: defaultInfraConfigs, - machineConfigs: defaultMachineConfigs, - expectUptoDate: true, - expectLogMessages: nil, - expectConditionMessages: nil, + name: "machine up-to-date", + kcp: defaultKcp, + machine: defaultMachine, + infraConfigs: defaultInfraConfigs, + machineConfigs: defaultMachineConfigs, + expectUptoDate: true, + expectEligibleForInPlaceUpdate: false, + expectLogMessages: nil, + expectConditionMessages: nil, }, { name: "certificate are expiring soon", @@ -1393,12 +1574,13 @@ func TestUpToDate(t *testing.T) { kcp.Spec.Rollout.Before.CertificatesExpiryDays = 150 // rollout if certificates will expire in less then 150 days. return kcp }(), - machine: defaultMachine, // certificates will expire in 100 days from now. - infraConfigs: defaultInfraConfigs, - machineConfigs: defaultMachineConfigs, - expectUptoDate: false, - expectLogMessages: []string{"certificates will expire soon, rolloutBefore expired"}, - expectConditionMessages: []string{"Certificates will expire soon"}, + machine: defaultMachine, // certificates will expire in 100 days from now. + infraConfigs: defaultInfraConfigs, + machineConfigs: defaultMachineConfigs, + expectUptoDate: false, + expectEligibleForInPlaceUpdate: false, + expectLogMessages: []string{"certificates will expire soon, rolloutBefore expired"}, + expectConditionMessages: []string{"Certificates will expire soon"}, }, { name: "rollout after expired", @@ -1407,12 +1589,13 @@ func TestUpToDate(t *testing.T) { kcp.Spec.Rollout.After = metav1.Time{Time: reconciliationTime.Add(-1 * 24 * time.Hour)} // one day ago return kcp }(), - machine: defaultMachine, // created two days ago - infraConfigs: defaultInfraConfigs, - machineConfigs: defaultMachineConfigs, - expectUptoDate: false, - expectLogMessages: []string{"rolloutAfter expired"}, - expectConditionMessages: []string{"KubeadmControlPlane spec.rolloutAfter expired"}, + machine: defaultMachine, // created two days ago + infraConfigs: defaultInfraConfigs, + machineConfigs: defaultMachineConfigs, + expectUptoDate: false, + expectEligibleForInPlaceUpdate: false, + expectLogMessages: []string{"rolloutAfter expired"}, + expectConditionMessages: []string{"KubeadmControlPlane spec.rolloutAfter expired"}, }, { name: "kubernetes version does not match", @@ -1421,12 +1604,13 @@ func TestUpToDate(t *testing.T) { kcp.Spec.Version = "v1.30.2" return kcp }(), - machine: defaultMachine, // defaultMachine has "v1.31.0" - infraConfigs: defaultInfraConfigs, - machineConfigs: defaultMachineConfigs, - expectUptoDate: false, - expectLogMessages: []string{"Machine version \"v1.30.0\" is not equal to KCP version \"v1.30.2\""}, - expectConditionMessages: []string{"Version v1.30.0, v1.30.2 required"}, + machine: defaultMachine, // defaultMachine has "v1.31.0" + infraConfigs: defaultInfraConfigs, + machineConfigs: defaultMachineConfigs, + expectUptoDate: false, + expectEligibleForInPlaceUpdate: true, + expectLogMessages: []string{"Machine version \"v1.30.0\" is not equal to KCP version \"v1.30.2\""}, + expectConditionMessages: []string{"Version v1.30.0, v1.30.2 required"}, }, { name: "KubeadmConfig is not up-to-date", @@ -1435,30 +1619,48 @@ func TestUpToDate(t *testing.T) { kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificatesDir = "bar" return kcp }(), - machine: defaultMachine, // was created with cluster name "foo" - infraConfigs: defaultInfraConfigs, - machineConfigs: defaultMachineConfigs, - expectUptoDate: false, - expectLogMessages: []string{"Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{\n ClusterConfiguration: v1beta2.ClusterConfiguration{\n ... // 4 identical fields\n Scheduler: {},\n DNS: {},\n- CertificatesDir: \"foo\",\n+ CertificatesDir: \"bar\",\n ImageRepository: \"\",\n FeatureGates: nil,\n ... // 2 identical fields\n },\n InitConfiguration: {NodeRegistration: {ImagePullPolicy: \"IfNotPresent\"}},\n JoinConfiguration: {NodeRegistration: {ImagePullPolicy: \"IfNotPresent\"}},\n ... // 11 identical fields\n }"}, + machine: defaultMachine, // was created with cluster name "foo" + infraConfigs: defaultInfraConfigs, + machineConfigs: defaultMachineConfigs, + expectUptoDate: false, + expectEligibleForInPlaceUpdate: true, + expectLogMessages: []string{`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ + ClusterConfiguration: v1beta2.ClusterConfiguration{ + ... // 4 identical fields + Scheduler: {}, + DNS: {}, +- CertificatesDir: "foo", ++ CertificatesDir: "bar", + ImageRepository: "", + FeatureGates: nil, + ... // 2 identical fields + }, + InitConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + JoinConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + ... // 11 identical fields + }`}, expectConditionMessages: []string{"KubeadmConfig is not up-to-date"}, }, { - name: "AWSMachine is not up-to-date", + name: "InfraMachine is not up-to-date", kcp: func() *controlplanev1.KubeadmControlPlane { kcp := defaultKcp.DeepCopy() kcp.Spec.MachineTemplate.Spec.InfrastructureRef = clusterv1.ContractVersionedObjectReference{ - APIGroup: clusterv1.GroupVersionInfrastructure.Group, - Kind: "AWSMachineTemplate", - Name: "template2", - } // kcp moving to template 2 + APIGroup: builder.InfrastructureGroupVersion.Group, + Kind: builder.TestInfrastructureMachineTemplateKind, + Name: "infra-machine-template2", + } // kcp moving to infra-machine-template2 return kcp }(), - machine: defaultMachine, - infraConfigs: defaultInfraConfigs, // infra config cloned from template1 - machineConfigs: defaultMachineConfigs, - expectUptoDate: false, - expectLogMessages: []string{"Infrastructure template on KCP rotated from AWSMachineTemplate.infrastructure.cluster.x-k8s.io template1 to AWSMachineTemplate.infrastructure.cluster.x-k8s.io template2"}, - expectConditionMessages: []string{"AWSMachine is not up-to-date"}, + machine: defaultMachine, + infraConfigs: defaultInfraConfigs, // infra config cloned from infra-machine-template1 + machineConfigs: defaultMachineConfigs, + expectUptoDate: false, + expectEligibleForInPlaceUpdate: true, + expectLogMessages: []string{"Infrastructure template on KCP rotated from " + + "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io infra-machine-template1 to " + + "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io infra-machine-template2"}, + expectConditionMessages: []string{"TestInfrastructureMachine is not up-to-date"}, }, } @@ -1466,11 +1668,24 @@ func TestUpToDate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - upToDate, res, err := UpToDate(tt.machine, tt.kcp, &reconciliationTime, tt.infraConfigs, tt.machineConfigs) + scheme := runtime.NewScheme() + _ = apiextensionsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(builder.TestInfrastructureMachineTemplateCRD, infraMachineTemplate1, infraMachineTemplate2).Build() + + upToDate, res, err := UpToDate(t.Context(), c, &clusterv1.Cluster{}, tt.machine, tt.kcp, &reconciliationTime, tt.infraConfigs, tt.machineConfigs) g.Expect(err).ToNot(HaveOccurred()) g.Expect(upToDate).To(Equal(tt.expectUptoDate)) + g.Expect(res).ToNot(BeNil()) + g.Expect(res.EligibleForInPlaceUpdate).To(Equal(tt.expectEligibleForInPlaceUpdate)) + g.Expect(res.DesiredMachine).ToNot(BeNil()) + g.Expect(res.DesiredMachine.Spec.Version).To(Equal(tt.kcp.Spec.Version)) + g.Expect(res.CurrentInfraMachine).ToNot(BeNil()) + g.Expect(res.DesiredInfraMachine).ToNot(BeNil()) + g.Expect(res.CurrentKubeadmConfig).ToNot(BeNil()) + g.Expect(res.DesiredKubeadmConfig).ToNot(BeNil()) if upToDate { - g.Expect(res).To(BeNil()) + g.Expect(res.LogMessages).To(BeEmpty()) + g.Expect(res.ConditionMessages).To(BeEmpty()) } else { g.Expect(res.LogMessages).To(BeComparableTo(tt.expectLogMessages)) g.Expect(res.ConditionMessages).To(Equal(tt.expectConditionMessages)) diff --git a/controlplane/kubeadm/internal/workload_cluster.go b/controlplane/kubeadm/internal/workload_cluster.go index 80d497b90c9c..7eece0c70c0b 100644 --- a/controlplane/kubeadm/internal/workload_cluster.go +++ b/controlplane/kubeadm/internal/workload_cluster.go @@ -45,13 +45,13 @@ import ( controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" kubeadmtypes "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/proxy" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" containerutil "sigs.k8s.io/cluster-api/util/container" "sigs.k8s.io/cluster-api/util/patch" - "sigs.k8s.io/cluster-api/util/version" ) const ( @@ -63,13 +63,6 @@ const ( ) var ( - // minKubernetesVersionControlPlaneKubeletLocalMode is the min version from which - // we will enable the ControlPlaneKubeletLocalMode kubeadm feature gate. - // Note: We have to do this with Kubernetes 1.31. Because with that version we encountered - // a case where it's not okay anymore to ignore the Kubernetes version skew (kubelet 1.31 uses - // the spec.clusterIP field selector that is only implemented in kube-apiserver >= 1.31.0). - minKubernetesVersionControlPlaneKubeletLocalMode = semver.MustParse("1.31.0") - // ErrControlPlaneMinNodes signals that a cluster doesn't meet the minimum required nodes // to remove an etcd member. ErrControlPlaneMinNodes = errors.New("cluster has fewer than 2 control plane nodes; removing an etcd member is not supported") @@ -166,7 +159,7 @@ func (w *Workload) UpdateFeatureGatesInKubeadmConfigMap(kubeadmConfigSpec bootst return func(c *bootstrapv1.ClusterConfiguration) { // We use DeepCopy here to avoid modifying the KCP object in the apiserver. kubeadmConfigSpec := kubeadmConfigSpec.DeepCopy() - DefaultFeatureGates(kubeadmConfigSpec, kubernetesVersion) + desiredstate.DefaultFeatureGates(kubeadmConfigSpec, kubernetesVersion) // Even if featureGates is nil, reset it to ClusterConfiguration // to override any previously set feature gates. @@ -174,27 +167,6 @@ func (w *Workload) UpdateFeatureGatesInKubeadmConfigMap(kubeadmConfigSpec bootst } } -const ( - // ControlPlaneKubeletLocalMode is a feature gate of kubeadm that ensures - // kubelets only communicate with the local apiserver. - ControlPlaneKubeletLocalMode = "ControlPlaneKubeletLocalMode" -) - -// DefaultFeatureGates defaults the feature gates field. -func DefaultFeatureGates(kubeadmConfigSpec *bootstrapv1.KubeadmConfigSpec, kubernetesVersion semver.Version) { - if version.Compare(kubernetesVersion, minKubernetesVersionControlPlaneKubeletLocalMode, version.WithoutPreReleases()) < 0 { - return - } - - if kubeadmConfigSpec.ClusterConfiguration.FeatureGates == nil { - kubeadmConfigSpec.ClusterConfiguration.FeatureGates = map[string]bool{} - } - - if _, ok := kubeadmConfigSpec.ClusterConfiguration.FeatureGates[ControlPlaneKubeletLocalMode]; !ok { - kubeadmConfigSpec.ClusterConfiguration.FeatureGates[ControlPlaneKubeletLocalMode] = true - } -} - // UpdateAPIServerInKubeadmConfigMap updates api server configuration in kubeadm config map. func (w *Workload) UpdateAPIServerInKubeadmConfigMap(apiServer bootstrapv1.APIServer) func(*bootstrapv1.ClusterConfiguration) { return func(c *bootstrapv1.ClusterConfiguration) { diff --git a/controlplane/kubeadm/internal/workload_cluster_test.go b/controlplane/kubeadm/internal/workload_cluster_test.go index 5390ac42faf6..4d4bfdae50f2 100644 --- a/controlplane/kubeadm/internal/workload_cluster_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_test.go @@ -35,6 +35,7 @@ import ( bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" utilyaml "sigs.k8s.io/cluster-api/util/yaml" ) @@ -947,7 +948,7 @@ func TestUpdateFeatureGatesInKubeadmConfigMap(t *testing.T) { }, wantClusterConfiguration: bootstrapv1.ClusterConfiguration{ FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: true, + desiredstate.ControlPlaneKubeletLocalMode: true, }, }, }, @@ -964,8 +965,8 @@ func TestUpdateFeatureGatesInKubeadmConfigMap(t *testing.T) { }, wantClusterConfiguration: bootstrapv1.ClusterConfiguration{ FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: true, - "EtcdLearnerMode": true, + desiredstate.ControlPlaneKubeletLocalMode: true, + "EtcdLearnerMode": true, }, }, }, @@ -977,14 +978,14 @@ func TestUpdateFeatureGatesInKubeadmConfigMap(t *testing.T) { kubernetesVersion: semver.MustParse("1.31.0"), newClusterConfiguration: bootstrapv1.ClusterConfiguration{ FeatureGates: map[string]bool{ - "EtcdLearnerMode": true, - ControlPlaneKubeletLocalMode: false, + "EtcdLearnerMode": true, + desiredstate.ControlPlaneKubeletLocalMode: false, }, }, wantClusterConfiguration: bootstrapv1.ClusterConfiguration{ FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: false, - "EtcdLearnerMode": true, + desiredstate.ControlPlaneKubeletLocalMode: false, + "EtcdLearnerMode": true, }, }, }, @@ -1085,126 +1086,6 @@ func TestUpdateCertificateValidityPeriodDaysInKubeadmConfigMap(t *testing.T) { } } -func TestDefaultFeatureGates(t *testing.T) { - tests := []struct { - name string - kubernetesVersion semver.Version - kubeadmConfigSpec *bootstrapv1.KubeadmConfigSpec - wantKubeadmConfigSpec *bootstrapv1.KubeadmConfigSpec - }{ - { - name: "don't default ControlPlaneKubeletLocalMode for 1.30", - kubernetesVersion: semver.MustParse("1.30.99"), - kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - "EtcdLearnerMode": true, - }, - }, - }, - wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - "EtcdLearnerMode": true, - }, - }, - }, - }, - { - name: "default ControlPlaneKubeletLocalMode for 1.31", - kubernetesVersion: semver.MustParse("1.31.0"), - kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, - }, - wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: true, - }, - }, - }, - }, - { - name: "default ControlPlaneKubeletLocalMode for 1.31", - kubernetesVersion: semver.MustParse("1.31.0"), - kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: nil, - }, - }, - wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: true, - }, - }, - }, - }, - { - name: "default ControlPlaneKubeletLocalMode for 1.31", - kubernetesVersion: semver.MustParse("1.31.0"), - kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{}, - }, - }, - wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: true, - }, - }, - }, - }, - { - name: "default ControlPlaneKubeletLocalMode for 1.31", - kubernetesVersion: semver.MustParse("1.31.0"), - kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - "EtcdLearnerMode": true, - }, - }, - }, - wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: true, - "EtcdLearnerMode": true, - }, - }, - }, - }, - { - name: "don't default ControlPlaneKubeletLocalMode for 1.31 if already set to false", - kubernetesVersion: semver.MustParse("1.31.0"), - kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: false, - }, - }, - }, - wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: false, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - DefaultFeatureGates(tt.kubeadmConfigSpec, tt.kubernetesVersion) - g.Expect(tt.wantKubeadmConfigSpec).Should(BeComparableTo(tt.kubeadmConfigSpec)) - }) - } -} - func getProxyImageInfo(ctx context.Context, c client.Client) (string, error) { ds := &appsv1.DaemonSet{} diff --git a/internal/util/ssa/patch.go b/internal/util/ssa/patch.go index 9b22a943a130..4ff6dfa84d80 100644 --- a/internal/util/ssa/patch.go +++ b/internal/util/ssa/patch.go @@ -103,8 +103,11 @@ func Patch(ctx context.Context, c client.Client, fieldManager string, modified c client.ForceOwnership, client.FieldOwner(fieldManager), } + // Note: Intentionally not including the name of the object in the error message + // as during create the name might be random generated in every reconcile. + // If these errors are written to conditions this would lead to an infinite reconcile. if err := c.Apply(ctx, client.ApplyConfigurationFromUnstructured(modifiedUnstructured), applyOptions...); err != nil { - return errors.Wrapf(err, "failed to apply %s %s", gvk.Kind, klog.KObj(modifiedUnstructured)) + return errors.Wrapf(err, "failed to apply %s", gvk.Kind) } // Write back the modified object so callers can access the patched object.