Skip to content

Commit 12d8855

Browse files
committed
Set Kubernetes version in machinepool machine Status.version
1 parent 0cdfb88 commit 12d8855

6 files changed

+411
-87
lines changed

exp/internal/controllers/machinepool_controller.go

Lines changed: 25 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,21 @@ type MachinePoolReconciler struct {
8080
externalTracker external.ObjectTracker
8181
}
8282

83+
// scope holds the different objects that are read and used during the reconcile.
84+
type scope struct {
85+
// cluster is the Cluster object the Machine belongs to.
86+
// It is set at the beginning of the reconcile function.
87+
cluster *clusterv1.Cluster
88+
89+
// machinePool is the MachinePool object. It is set at the beginning
90+
// of the reconcile function.
91+
machinePool *expv1.MachinePool
92+
93+
// nodeRefMapResult is a map of providerIDs to Nodes that are associated with the Cluster.
94+
// It is set after reconcileInfrastructure is called.
95+
nodeRefMap map[string]*corev1.Node
96+
}
97+
8398
func (r *MachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
8499
clusterToMachinePools, err := util.ClusterToTypedObjectsMapper(mgr.GetClient(), &expv1.MachinePoolList{}, mgr.GetScheme())
85100
if err != nil {
@@ -210,7 +225,11 @@ func (r *MachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request)
210225
}
211226

212227
// Handle normal reconciliation loop.
213-
res, err := r.reconcile(ctx, cluster, mp)
228+
scope := &scope{
229+
cluster: cluster,
230+
machinePool: mp,
231+
}
232+
res, err := r.reconcile(ctx, scope)
214233
// Requeue if the reconcile failed because the ClusterCacheTracker was locked for
215234
// the current cluster because of concurrent access.
216235
if errors.Is(err, remote.ErrClusterLocked) {
@@ -220,16 +239,18 @@ func (r *MachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request)
220239
return res, err
221240
}
222241

223-
func (r *MachinePoolReconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) {
242+
func (r *MachinePoolReconciler) reconcile(ctx context.Context, s *scope) (ctrl.Result, error) {
224243
// Ensure the MachinePool is owned by the Cluster it belongs to.
244+
cluster := s.cluster
245+
mp := s.machinePool
225246
mp.SetOwnerReferences(util.EnsureOwnerRef(mp.GetOwnerReferences(), metav1.OwnerReference{
226247
APIVersion: clusterv1.GroupVersion.String(),
227248
Kind: "Cluster",
228249
Name: cluster.Name,
229250
UID: cluster.UID,
230251
}))
231252

232-
phases := []func(context.Context, *clusterv1.Cluster, *expv1.MachinePool) (ctrl.Result, error){
253+
phases := []func(context.Context, *scope) (ctrl.Result, error){
233254
r.reconcileBootstrap,
234255
r.reconcileInfrastructure,
235256
r.reconcileNodeRefs,
@@ -239,7 +260,7 @@ func (r *MachinePoolReconciler) reconcile(ctx context.Context, cluster *clusterv
239260
errs := []error{}
240261
for _, phase := range phases {
241262
// Call the inner reconciliation methods.
242-
phaseResult, err := phase(ctx, cluster, mp)
263+
phaseResult, err := phase(ctx, s)
243264
if err != nil {
244265
errs = append(errs, err)
245266
}

exp/internal/controllers/machinepool_controller_noderef.go

Lines changed: 12 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,10 @@ type getNodeReferencesResult struct {
4646
ready int
4747
}
4848

49-
func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) {
49+
func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, s *scope) (ctrl.Result, error) {
5050
log := ctrl.LoggerFrom(ctx)
51+
cluster := s.cluster
52+
mp := s.machinePool
5153

5254
// Create a watch on the nodes in the Cluster.
5355
if err := r.watchClusterNodes(ctx, cluster); err != nil {
@@ -81,8 +83,13 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *
8183
return ctrl.Result{}, err
8284
}
8385

86+
// Return early if nodeRefMap is nil.
87+
if s.nodeRefMap == nil {
88+
return ctrl.Result{}, errors.Wrapf(err, "failed to get node references")
89+
}
90+
8491
// Get the Node references.
85-
nodeRefsResult, err := r.getNodeReferences(ctx, clusterClient, mp.Spec.ProviderIDList, mp.Spec.MinReadySeconds)
92+
nodeRefsResult, err := r.getNodeReferences(ctx, mp.Spec.ProviderIDList, mp.Spec.MinReadySeconds, s.nodeRefMap)
8693
if err != nil {
8794
if err == errNoAvailableNodes {
8895
log.Info("Cannot assign NodeRefs to MachinePool, no matching Nodes")
@@ -153,30 +160,10 @@ func (r *MachinePoolReconciler) deleteRetiredNodes(ctx context.Context, c client
153160
return nil
154161
}
155162

156-
func (r *MachinePoolReconciler) getNodeReferences(ctx context.Context, c client.Client, providerIDList []string, minReadySeconds *int32) (getNodeReferencesResult, error) {
163+
func (r *MachinePoolReconciler) getNodeReferences(ctx context.Context, providerIDList []string, minReadySeconds *int32, nodeRefsMap map[string]*corev1.Node) (getNodeReferencesResult, error) {
157164
log := ctrl.LoggerFrom(ctx, "providerIDList", len(providerIDList))
158165

159166
var ready, available int
160-
nodeRefsMap := make(map[string]corev1.Node)
161-
nodeList := corev1.NodeList{}
162-
for {
163-
if err := c.List(ctx, &nodeList, client.Continue(nodeList.Continue)); err != nil {
164-
return getNodeReferencesResult{}, errors.Wrapf(err, "failed to List nodes")
165-
}
166-
167-
for _, node := range nodeList.Items {
168-
if node.Spec.ProviderID == "" {
169-
log.V(2).Info("No ProviderID detected, skipping", "providerID", node.Spec.ProviderID)
170-
continue
171-
}
172-
173-
nodeRefsMap[node.Spec.ProviderID] = node
174-
}
175-
176-
if nodeList.Continue == "" {
177-
break
178-
}
179-
}
180167

181168
var nodeRefs []corev1.ObjectReference
182169
for _, providerID := range providerIDList {
@@ -185,9 +172,9 @@ func (r *MachinePoolReconciler) getNodeReferences(ctx context.Context, c client.
185172
continue
186173
}
187174
if node, ok := nodeRefsMap[providerID]; ok {
188-
if noderefutil.IsNodeReady(&node) {
175+
if noderefutil.IsNodeReady(node) {
189176
ready++
190-
if noderefutil.IsNodeAvailable(&node, *minReadySeconds, metav1.Now()) {
177+
if noderefutil.IsNodeAvailable(node, *minReadySeconds, metav1.Now()) {
191178
available++
192179
}
193180
}

exp/internal/controllers/machinepool_controller_noderef_test.go

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,8 @@ func TestMachinePoolGetNodeReference(t *testing.T) {
3636
Client: fake.NewClientBuilder().Build(),
3737
recorder: record.NewFakeRecorder(32),
3838
}
39-
40-
nodeList := []client.Object{
41-
&corev1.Node{
39+
nodeRefsMap := map[string]*corev1.Node{
40+
"aws://us-east-1/id-node-1": {
4241
ObjectMeta: metav1.ObjectMeta{
4342
Name: "node-1",
4443
},
@@ -54,7 +53,7 @@ func TestMachinePoolGetNodeReference(t *testing.T) {
5453
},
5554
},
5655
},
57-
&corev1.Node{
56+
"aws://us-west-2/id-node-2": {
5857
ObjectMeta: metav1.ObjectMeta{
5958
Name: "node-2",
6059
},
@@ -70,15 +69,15 @@ func TestMachinePoolGetNodeReference(t *testing.T) {
7069
},
7170
},
7271
},
73-
&corev1.Node{
72+
"aws://us-west-2/id-node-3": {
7473
ObjectMeta: metav1.ObjectMeta{
7574
Name: "node-3",
7675
},
7776
Spec: corev1.NodeSpec{
7877
ProviderID: "aws://us-west-2/id-node-3",
7978
},
8079
},
81-
&corev1.Node{
80+
"gce://us-central1/gce-id-node-2": {
8281
ObjectMeta: metav1.ObjectMeta{
8382
Name: "gce-node-2",
8483
},
@@ -94,7 +93,7 @@ func TestMachinePoolGetNodeReference(t *testing.T) {
9493
},
9594
},
9695
},
97-
&corev1.Node{
96+
"azure://westus2/id-node-4": {
9897
ObjectMeta: metav1.ObjectMeta{
9998
Name: "azure-node-4",
10099
},
@@ -110,7 +109,7 @@ func TestMachinePoolGetNodeReference(t *testing.T) {
110109
},
111110
},
112111
},
113-
&corev1.Node{
112+
"azure://westus2/id-nodepool1/0": {
114113
ObjectMeta: metav1.ObjectMeta{
115114
Name: "azure-nodepool1-0",
116115
},
@@ -126,7 +125,7 @@ func TestMachinePoolGetNodeReference(t *testing.T) {
126125
},
127126
},
128127
},
129-
&corev1.Node{
128+
"azure://westus2/id-nodepool2/0": {
130129
ObjectMeta: metav1.ObjectMeta{
131130
Name: "azure-nodepool2-0",
132131
},
@@ -144,8 +143,6 @@ func TestMachinePoolGetNodeReference(t *testing.T) {
144143
},
145144
}
146145

147-
client := fake.NewClientBuilder().WithObjects(nodeList...).Build()
148-
149146
testCases := []struct {
150147
name string
151148
providerIDList []string
@@ -284,7 +281,7 @@ func TestMachinePoolGetNodeReference(t *testing.T) {
284281
t.Run(test.name, func(t *testing.T) {
285282
g := NewWithT(t)
286283

287-
result, err := r.getNodeReferences(ctx, client, test.providerIDList, ptr.To(test.minReadySeconds))
284+
result, err := r.getNodeReferences(ctx, test.providerIDList, ptr.To(test.minReadySeconds), nodeRefsMap)
288285
if test.err == nil {
289286
g.Expect(err).ToNot(HaveOccurred())
290287
} else {

exp/internal/controllers/machinepool_controller_phases.go

Lines changed: 69 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -177,9 +177,10 @@ func (r *MachinePoolReconciler) reconcileExternal(ctx context.Context, cluster *
177177
}
178178

179179
// reconcileBootstrap reconciles the Spec.Bootstrap.ConfigRef object on a MachinePool.
180-
func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1.Cluster, m *expv1.MachinePool) (ctrl.Result, error) {
180+
func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, s *scope) (ctrl.Result, error) {
181181
log := ctrl.LoggerFrom(ctx)
182-
182+
cluster := s.cluster
183+
m := s.machinePool
183184
// Call generic external reconciler if we have an external reference.
184185
var bootstrapConfig *unstructured.Unstructured
185186
if m.Spec.Template.Spec.Bootstrap.ConfigRef != nil {
@@ -241,9 +242,10 @@ func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster
241242
}
242243

243244
// reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a MachinePool.
244-
func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) {
245+
func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, s *scope) (ctrl.Result, error) {
245246
log := ctrl.LoggerFrom(ctx)
246-
247+
cluster := s.cluster
248+
mp := s.machinePool
247249
// Call generic external reconciler.
248250
infraReconcileResult, err := r.reconcileExternal(ctx, cluster, mp, &mp.Spec.Template.Spec.InfrastructureRef)
249251
if err != nil {
@@ -283,8 +285,19 @@ func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, clu
283285
conditions.WithFallbackValue(ready, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""),
284286
)
285287

286-
if err := r.reconcileMachines(ctx, mp, infraConfig); err != nil {
287-
return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile Machines for MachinePool %s", klog.KObj(mp))
288+
clusterClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
289+
if err != nil {
290+
return ctrl.Result{}, err
291+
}
292+
293+
var getNodeRefsErr error
294+
// Get the nodeRefsMap from the cluster.
295+
s.nodeRefMap, getNodeRefsErr = r.getNodeRefMap(ctx, clusterClient)
296+
297+
err = r.reconcileMachines(ctx, s, infraConfig)
298+
299+
if err != nil || getNodeRefsErr != nil {
300+
return ctrl.Result{}, kerrors.NewAggregate([]error{errors.Wrapf(err, "failed to reconcile Machines for MachinePool %s", klog.KObj(mp)), errors.Wrapf(getNodeRefsErr, "failed to get nodeRefs for MachinePool %s", klog.KObj(mp))})
288301
}
289302

290303
if !mp.Status.InfrastructureReady {
@@ -328,8 +341,9 @@ func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, clu
328341
// infrastructure is created accordingly.
329342
// Note: When supported by the cloud provider implementation of the MachinePool, machines will provide a means to interact
330343
// with the corresponding infrastructure (e.g. delete a specific machine in case MachineHealthCheck detects it is unhealthy).
331-
func (r *MachinePoolReconciler) reconcileMachines(ctx context.Context, mp *expv1.MachinePool, infraMachinePool *unstructured.Unstructured) error {
344+
func (r *MachinePoolReconciler) reconcileMachines(ctx context.Context, s *scope, infraMachinePool *unstructured.Unstructured) error {
332345
log := ctrl.LoggerFrom(ctx)
346+
mp := s.machinePool
333347

334348
var infraMachineKind string
335349
if err := util.UnstructuredUnmarshalField(infraMachinePool, &infraMachineKind, "status", "infrastructureMachineKind"); err != nil {
@@ -376,15 +390,15 @@ func (r *MachinePoolReconciler) reconcileMachines(ctx context.Context, mp *expv1
376390
return err
377391
}
378392

379-
if err := r.createOrUpdateMachines(ctx, mp, machineList.Items, infraMachineList.Items); err != nil {
393+
if err := r.createOrUpdateMachines(ctx, s, machineList.Items, infraMachineList.Items); err != nil {
380394
return errors.Wrapf(err, "failed to create machines for MachinePool %q in namespace %q", mp.Name, mp.Namespace)
381395
}
382396

383397
return nil
384398
}
385399

386400
// createOrUpdateMachines creates a MachinePool Machine for each infraMachine if it doesn't already exist and sets the owner reference and infraRef.
387-
func (r *MachinePoolReconciler) createOrUpdateMachines(ctx context.Context, mp *expv1.MachinePool, machines []clusterv1.Machine, infraMachines []unstructured.Unstructured) error {
401+
func (r *MachinePoolReconciler) createOrUpdateMachines(ctx context.Context, s *scope, machines []clusterv1.Machine, infraMachines []unstructured.Unstructured) error {
388402
log := ctrl.LoggerFrom(ctx)
389403

390404
// Construct a set of names of infraMachines that already have a Machine.
@@ -398,19 +412,30 @@ func (r *MachinePoolReconciler) createOrUpdateMachines(ctx context.Context, mp *
398412
var errs []error
399413
for i := range infraMachines {
400414
infraMachine := &infraMachines[i]
415+
416+
// Get Spec.ProviderID from the infraMachine.
417+
var providerID string
418+
var node *corev1.Node
419+
if err := util.UnstructuredUnmarshalField(infraMachine, &providerID, "spec", "providerID"); err != nil {
420+
log.V(4).Info("could not retrieve providerID for infraMachine", "infraMachine", klog.KObj(infraMachine))
421+
} else {
422+
// Retrieve the Node for the infraMachine from the nodeRefsMap using the providerID.
423+
node = s.nodeRefMap[providerID]
424+
}
425+
401426
// If infraMachine already has a Machine, update it if needed.
402427
if existingMachine, ok := infraMachineToMachine[infraMachine.GetName()]; ok {
403428
log.V(2).Info("Patching existing Machine for infraMachine", infraMachine.GetKind(), klog.KObj(infraMachine), "Machine", klog.KObj(&existingMachine))
404429

405-
desiredMachine := computeDesiredMachine(mp, infraMachine, &existingMachine)
430+
desiredMachine := r.computeDesiredMachine(s.machinePool, infraMachine, &existingMachine, node)
406431
if err := ssa.Patch(ctx, r.Client, MachinePoolControllerName, desiredMachine, ssa.WithCachingProxy{Cache: r.ssaCache, Original: &existingMachine}); err != nil {
407432
log.Error(err, "failed to update Machine", "Machine", klog.KObj(desiredMachine))
408433
errs = append(errs, errors.Wrapf(err, "failed to update Machine %q", klog.KObj(desiredMachine)))
409434
}
410435
} else {
411436
// Otherwise create a new Machine for the infraMachine.
412437
log.Info("Creating new Machine for infraMachine", "infraMachine", klog.KObj(infraMachine))
413-
machine := computeDesiredMachine(mp, infraMachine, nil)
438+
machine := r.computeDesiredMachine(s.machinePool, infraMachine, nil, node)
414439

415440
if err := ssa.Patch(ctx, r.Client, MachinePoolControllerName, machine); err != nil {
416441
errs = append(errs, errors.Wrapf(err, "failed to create new Machine for infraMachine %q in namespace %q", infraMachine.GetName(), infraMachine.GetNamespace()))
@@ -432,14 +457,19 @@ func (r *MachinePoolReconciler) createOrUpdateMachines(ctx context.Context, mp *
432457

433458
// computeDesiredMachine constructs the desired Machine for an infraMachine.
434459
// If the Machine exists, it ensures the Machine always owned by the MachinePool.
435-
func computeDesiredMachine(mp *expv1.MachinePool, infraMachine *unstructured.Unstructured, existingMachine *clusterv1.Machine) *clusterv1.Machine {
460+
func (r *MachinePoolReconciler) computeDesiredMachine(mp *expv1.MachinePool, infraMachine *unstructured.Unstructured, existingMachine *clusterv1.Machine, existingNode *corev1.Node) *clusterv1.Machine {
436461
infraRef := corev1.ObjectReference{
437462
APIVersion: infraMachine.GetAPIVersion(),
438463
Kind: infraMachine.GetKind(),
439464
Name: infraMachine.GetName(),
440465
Namespace: infraMachine.GetNamespace(),
441466
}
442467

468+
var kubernetesVersion *string
469+
if existingNode != nil && existingNode.Status.NodeInfo.KubeletVersion != "" {
470+
kubernetesVersion = &existingNode.Status.NodeInfo.KubeletVersion
471+
}
472+
443473
machine := &clusterv1.Machine{
444474
ObjectMeta: metav1.ObjectMeta{
445475
Name: infraMachine.GetName(),
@@ -452,6 +482,7 @@ func computeDesiredMachine(mp *expv1.MachinePool, infraMachine *unstructured.Uns
452482
Spec: clusterv1.MachineSpec{
453483
ClusterName: mp.Spec.ClusterName,
454484
InfrastructureRef: infraRef,
485+
Version: kubernetesVersion,
455486
},
456487
}
457488

@@ -537,3 +568,29 @@ func (r *MachinePoolReconciler) waitForMachineCreation(ctx context.Context, mach
537568

538569
return nil
539570
}
571+
572+
func (r *MachinePoolReconciler) getNodeRefMap(ctx context.Context, c client.Client) (map[string]*corev1.Node, error) {
573+
log := ctrl.LoggerFrom(ctx)
574+
nodeRefsMap := make(map[string]*corev1.Node)
575+
nodeList := corev1.NodeList{}
576+
for {
577+
if err := c.List(ctx, &nodeList, client.Continue(nodeList.Continue)); err != nil {
578+
return nil, err
579+
}
580+
581+
for _, node := range nodeList.Items {
582+
if node.Spec.ProviderID == "" {
583+
log.V(2).Info("No ProviderID detected, skipping", "providerID", node.Spec.ProviderID)
584+
continue
585+
}
586+
587+
nodeRefsMap[node.Spec.ProviderID] = &node
588+
}
589+
590+
if nodeList.Continue == "" {
591+
break
592+
}
593+
}
594+
595+
return nodeRefsMap, nil
596+
}

0 commit comments

Comments
 (0)