@@ -41,6 +41,7 @@ import (
41
41
42
42
infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1"
43
43
vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1"
44
+ "sigs.k8s.io/cluster-api-provider-vsphere/feature"
44
45
capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context"
45
46
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware"
46
47
infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util"
@@ -171,10 +172,6 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
171
172
return false , errors .New ("received unexpected SupervisorMachineContext type" )
172
173
}
173
174
174
- if supervisorMachineCtx .Machine .Spec .FailureDomain != "" {
175
- supervisorMachineCtx .VSphereMachine .Spec .FailureDomain = ptr .To (supervisorMachineCtx .Machine .Spec .FailureDomain )
176
- }
177
-
178
175
// If debug logging is enabled, report the number of vms in the cluster before and after the reconcile
179
176
if log .V (5 ).Enabled () {
180
177
vms , err := v .getVirtualMachinesInCluster (ctx , supervisorMachineCtx )
@@ -188,6 +185,96 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
188
185
// Set the VM state. Will get reset throughout the reconcile
189
186
supervisorMachineCtx .VSphereMachine .Status .VMStatus = vmwarev1 .VirtualMachineStatePending
190
187
188
+ // TODO: add check for control plane machine
189
+ var vmAffinitySpec * vmoprv1.VirtualMachineAffinitySpec
190
+ if feature .Gates .Enabled (feature .NodeAutoPlacement ) &&
191
+ supervisorMachineCtx .Machine .Spec .FailureDomain == "" &&
192
+ len (supervisorMachineCtx .VSphereCluster .Status .FailureDomains ) > 1 {
193
+ // Check for the presence of a VirtualMachineGroup with the name and namespace same as the name of the Cluster
194
+ vmOperatorVMGroup := & vmoprv1.VirtualMachineGroup {}
195
+ key := client.ObjectKey {
196
+ Namespace : supervisorMachineCtx .Cluster .Namespace ,
197
+ Name : supervisorMachineCtx .Cluster .Name ,
198
+ }
199
+ err := v .Client .Get (ctx , key , vmOperatorVMGroup )
200
+ if err != nil {
201
+ if ! apierrors .IsNotFound (err ) {
202
+ return false , err
203
+ }
204
+ if apierrors .IsNotFound (err ) {
205
+ log .V (4 ).Info ("VirtualMachineGroup not found, requeueing" )
206
+ return true , nil
207
+ }
208
+ }
209
+
210
+ // Check the presence of the node-pool label on the VirtualMachineGroup
211
+ nodePool := supervisorMachineCtx .Machine .Labels [clusterv1 .MachineDeploymentNameLabel ]
212
+ if zone , ok := vmOperatorVMGroup .Labels [fmt .Sprintf ("capv/%s" , nodePool )]; ok && zone != "" {
213
+ supervisorMachineCtx .VSphereMachine .Spec .FailureDomain = ptr .To (zone )
214
+ }
215
+
216
+ // Fetch the MachineDeployment objects for the Cluster and generate the list of names
217
+ // to define the anti-affinity for the VM object.
218
+ mdList := & clusterv1.MachineDeploymentList {}
219
+ if err := v .Client .List (ctx , mdList ,
220
+ client .InNamespace (supervisorMachineCtx .Cluster .Namespace ),
221
+ client.MatchingLabels {
222
+ clusterv1 .ClusterNameLabel : supervisorMachineCtx .Cluster .Name ,
223
+ }); err != nil {
224
+ return false , err
225
+ }
226
+
227
+ antiAffineMDNames := []string {}
228
+ for _ , md := range mdList .Items {
229
+ if md .Spec .Template .Spec .FailureDomain == "" && md .Name != nodePool {
230
+ antiAffineMDNames = append (antiAffineMDNames , md .Name )
231
+ }
232
+ }
233
+
234
+ vmAffinitySpec = & vmoprv1.VirtualMachineAffinitySpec {
235
+ VMAffinity : & vmoprv1.VirtualMachineAffinityVMAffinitySpec {
236
+ RequiredDuringSchedulingIgnoredDuringExecution : []vmoprv1.VMAffinityTerm {
237
+ {
238
+ LabelSelector : & metav1.LabelSelector {
239
+ MatchLabels : map [string ]string {
240
+ nodePoolLabelKey : nodePool ,
241
+ },
242
+ },
243
+ TopologyKey : kubeTopologyZoneLabelKey ,
244
+ },
245
+ },
246
+ },
247
+ VMAntiAffinity : & vmoprv1.VirtualMachineAntiAffinityVMAffinitySpec {
248
+ PreferredDuringSchedulingIgnoredDuringExecution : []vmoprv1.VMAffinityTerm {
249
+ {
250
+ LabelSelector : & metav1.LabelSelector {
251
+ MatchLabels : map [string ]string {
252
+ nodePoolLabelKey : nodePool ,
253
+ },
254
+ },
255
+ TopologyKey : kubeHostNameLabelKey ,
256
+ },
257
+ {
258
+ LabelSelector : & metav1.LabelSelector {
259
+ MatchExpressions : []metav1.LabelSelectorRequirement {
260
+ {
261
+ Key : nodePoolLabelKey ,
262
+ Operator : metav1 .LabelSelectorOpIn ,
263
+ Values : antiAffineMDNames ,
264
+ },
265
+ },
266
+ },
267
+ TopologyKey : kubeTopologyZoneLabelKey ,
268
+ },
269
+ },
270
+ },
271
+ }
272
+ }
273
+
274
+ if supervisorMachineCtx .Machine .Spec .FailureDomain != "" {
275
+ supervisorMachineCtx .VSphereMachine .Spec .FailureDomain = ptr .To (supervisorMachineCtx .Machine .Spec .FailureDomain )
276
+ }
277
+
191
278
// Check for the presence of an existing object
192
279
vmOperatorVM := & vmoprv1.VirtualMachine {}
193
280
key , err := virtualMachineObjectKey (supervisorMachineCtx .Machine .Name , supervisorMachineCtx .Machine .Namespace , supervisorMachineCtx .VSphereMachine .Spec .NamingStrategy )
@@ -208,7 +295,7 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
208
295
}
209
296
210
297
// Reconcile the VM Operator VirtualMachine.
211
- if err := v .reconcileVMOperatorVM (ctx , supervisorMachineCtx , vmOperatorVM ); err != nil {
298
+ if err := v .reconcileVMOperatorVM (ctx , supervisorMachineCtx , vmOperatorVM , vmAffinitySpec ); err != nil {
212
299
v1beta1conditions .MarkFalse (supervisorMachineCtx .VSphereMachine , infrav1 .VMProvisionedCondition , vmwarev1 .VMCreationFailedReason , clusterv1beta1 .ConditionSeverityWarning ,
213
300
"failed to create or update VirtualMachine: %v" , err )
214
301
v1beta2conditions .Set (supervisorMachineCtx .VSphereMachine , metav1.Condition {
@@ -378,7 +465,8 @@ func (v *VmopMachineService) GetHostInfo(ctx context.Context, machineCtx capvcon
378
465
return vmOperatorVM .Status .Host , nil
379
466
}
380
467
381
- func (v * VmopMachineService ) reconcileVMOperatorVM (ctx context.Context , supervisorMachineCtx * vmware.SupervisorMachineContext , vmOperatorVM * vmoprv1.VirtualMachine ) error {
468
+ // update the method to accept the vmAffinitySpec
469
+ func (v * VmopMachineService ) reconcileVMOperatorVM (ctx context.Context , supervisorMachineCtx * vmware.SupervisorMachineContext , vmOperatorVM * vmoprv1.VirtualMachine , vmAffinitySpec * vmoprv1.VirtualMachineAffinitySpec ) error {
382
470
// All Machine resources should define the version of Kubernetes to use.
383
471
if supervisorMachineCtx .Machine .Spec .Version == "" {
384
472
return errors .Errorf (
@@ -494,6 +582,15 @@ func (v *VmopMachineService) reconcileVMOperatorVM(ctx context.Context, supervis
494
582
vmOperatorVM = typedModified
495
583
}
496
584
585
+ if vmAffinitySpec != nil {
586
+ if vmOperatorVM .Spec .Affinity == nil {
587
+ vmOperatorVM .Spec .Affinity = vmAffinitySpec
588
+ }
589
+ if vmOperatorVM .Spec .GroupName == "" {
590
+ vmOperatorVM .Spec .GroupName = supervisorMachineCtx .GetCluster ().Name
591
+ }
592
+ }
593
+
497
594
// Make sure the VSphereMachine owns the VM Operator VirtualMachine.
498
595
if err := ctrlutil .SetControllerReference (supervisorMachineCtx .VSphereMachine , vmOperatorVM , v .Client .Scheme ()); err != nil {
499
596
return errors .Wrapf (err , "failed to mark %s %s/%s as owner of %s %s/%s" ,
@@ -800,6 +897,9 @@ func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels
800
897
// resources associated with the target cluster.
801
898
vmLabels [clusterv1 .ClusterNameLabel ] = supervisorMachineCtx .GetClusterContext ().Cluster .Name
802
899
900
+ // Ensure the VM has the machine deployment name label
901
+ vmLabels [nodePoolLabelKey ] = supervisorMachineCtx .Machine .Labels [clusterv1 .MachineDeploymentNameLabel ]
902
+
803
903
return vmLabels
804
904
}
805
905
0 commit comments