@@ -41,6 +41,7 @@ import (
41
41
42
42
infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1"
43
43
vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1"
44
+ "sigs.k8s.io/cluster-api-provider-vsphere/feature"
44
45
capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context"
45
46
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware"
46
47
infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util"
@@ -163,6 +164,15 @@ func (v *VmopMachineService) SyncFailureReason(_ context.Context, machineCtx cap
163
164
return supervisorMachineCtx .VSphereMachine .Status .FailureReason != nil || supervisorMachineCtx .VSphereMachine .Status .FailureMessage != nil , nil
164
165
}
165
166
167
+ type affinityInfo struct {
168
+ vmAffinitySpec * vmoprv1.VirtualMachineAffinitySpec
169
+ vmGroupName string
170
+ failureDomain * string
171
+
172
+ // TODO: is this needed for the single zone case?
173
+ // zones []topologyv1.Zone
174
+ }
175
+
166
176
// ReconcileNormal reconciles create and update events for VM Operator VMs.
167
177
func (v * VmopMachineService ) ReconcileNormal (ctx context.Context , machineCtx capvcontext.MachineContext ) (bool , error ) {
168
178
log := ctrl .LoggerFrom (ctx )
@@ -171,10 +181,6 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
171
181
return false , errors .New ("received unexpected SupervisorMachineContext type" )
172
182
}
173
183
174
- if supervisorMachineCtx .Machine .Spec .FailureDomain != "" {
175
- supervisorMachineCtx .VSphereMachine .Spec .FailureDomain = ptr .To (supervisorMachineCtx .Machine .Spec .FailureDomain )
176
- }
177
-
178
184
// If debug logging is enabled, report the number of vms in the cluster before and after the reconcile
179
185
if log .V (5 ).Enabled () {
180
186
vms , err := v .getVirtualMachinesInCluster (ctx , supervisorMachineCtx )
@@ -188,6 +194,101 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
188
194
// Set the VM state. Will get reset throughout the reconcile
189
195
supervisorMachineCtx .VSphereMachine .Status .VMStatus = vmwarev1 .VirtualMachineStatePending
190
196
197
+ // var vmAffinitySpec *vmoprv1.VirtualMachineAffinitySpec
198
+ var affInfo affinityInfo
199
+ if feature .Gates .Enabled (feature .NodeAutoPlacement ) &&
200
+ ! infrautilv1 .IsControlPlaneMachine (machineCtx .GetVSphereMachine ()) {
201
+ // Check for the presence of a VirtualMachineGroup with the name and namespace same as the name of the Cluster
202
+ vmOperatorVMGroup := & vmoprv1.VirtualMachineGroup {}
203
+ key := client.ObjectKey {
204
+ Namespace : supervisorMachineCtx .Cluster .Namespace ,
205
+ Name : supervisorMachineCtx .Cluster .Name ,
206
+ }
207
+ err := v .Client .Get (ctx , key , vmOperatorVMGroup )
208
+ if err != nil {
209
+ if ! apierrors .IsNotFound (err ) {
210
+ return false , err
211
+ }
212
+ if apierrors .IsNotFound (err ) {
213
+ log .V (4 ).Info ("VirtualMachineGroup not found, requeueing" )
214
+ return true , nil
215
+ }
216
+ }
217
+
218
+ // Check if the current machine is a member of the boot order
219
+ // in the VirtualMachineGroup.
220
+ if ! v .checkVirtualMachineGroupMembership (vmOperatorVMGroup , supervisorMachineCtx ) {
221
+ log .V (4 ).Info ("Waiting for VirtualMachineGroup membership, requeueing" )
222
+ return true , nil
223
+ }
224
+
225
+ // Initialize the affinityInfo for the VM
226
+ affInfo = affinityInfo {
227
+ vmGroupName : vmOperatorVMGroup .Name ,
228
+ }
229
+
230
+ // Check the presence of the node-pool label on the VirtualMachineGroup object
231
+ nodePool := supervisorMachineCtx .Machine .Labels [clusterv1 .MachineDeploymentNameLabel ]
232
+ if zone , ok := vmOperatorVMGroup .Labels [fmt .Sprintf ("zone.cluster.x-k8s.io/%s" , nodePool )]; ok && zone != "" {
233
+ affInfo .failureDomain = ptr .To (zone )
234
+ }
235
+
236
+ affInfo .vmAffinitySpec = & vmoprv1.VirtualMachineAffinitySpec {
237
+ VMAffinity : & vmoprv1.VirtualMachineAffinityVMAffinitySpec {
238
+ RequiredDuringSchedulingIgnoredDuringExecution : []vmoprv1.VMAffinityTerm {
239
+ {
240
+ LabelSelector : & metav1.LabelSelector {
241
+ MatchLabels : map [string ]string {
242
+ clusterv1 .MachineDeploymentNameLabel : nodePool ,
243
+ },
244
+ },
245
+ TopologyKey : kubeTopologyZoneLabelKey ,
246
+ },
247
+ },
248
+ },
249
+ VMAntiAffinity : & vmoprv1.VirtualMachineAntiAffinityVMAffinitySpec {
250
+ PreferredDuringSchedulingPreferredDuringExecution : []vmoprv1.VMAffinityTerm {
251
+ {
252
+ LabelSelector : & metav1.LabelSelector {
253
+ MatchLabels : map [string ]string {
254
+ clusterv1 .MachineDeploymentNameLabel : nodePool ,
255
+ },
256
+ MatchExpressions : []metav1.LabelSelectorRequirement {
257
+ {
258
+ Key : clusterv1 .MachineControlPlaneLabel ,
259
+ Operator : metav1 .LabelSelectorOpDoesNotExist ,
260
+ },
261
+ },
262
+ },
263
+ TopologyKey : kubeHostNameLabelKey ,
264
+ },
265
+ },
266
+ PreferredDuringSchedulingIgnoredDuringExecution : []vmoprv1.VMAffinityTerm {
267
+ {
268
+ LabelSelector : & metav1.LabelSelector {
269
+ MatchExpressions : []metav1.LabelSelectorRequirement {
270
+ {
271
+ Key : clusterv1 .MachineDeploymentNameLabel ,
272
+ Operator : metav1 .LabelSelectorOpNotIn ,
273
+ Values : []string {nodePool },
274
+ },
275
+ {
276
+ Key : clusterv1 .MachineControlPlaneLabel ,
277
+ Operator : metav1 .LabelSelectorOpDoesNotExist ,
278
+ },
279
+ },
280
+ },
281
+ TopologyKey : kubeTopologyZoneLabelKey ,
282
+ },
283
+ },
284
+ },
285
+ }
286
+ }
287
+
288
+ if supervisorMachineCtx .Machine .Spec .FailureDomain != "" {
289
+ supervisorMachineCtx .VSphereMachine .Spec .FailureDomain = ptr .To (supervisorMachineCtx .Machine .Spec .FailureDomain )
290
+ }
291
+
191
292
// Check for the presence of an existing object
192
293
vmOperatorVM := & vmoprv1.VirtualMachine {}
193
294
key , err := virtualMachineObjectKey (supervisorMachineCtx .Machine .Name , supervisorMachineCtx .Machine .Namespace , supervisorMachineCtx .VSphereMachine .Spec .NamingStrategy )
@@ -208,7 +309,7 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
208
309
}
209
310
210
311
// Reconcile the VM Operator VirtualMachine.
211
- if err := v .reconcileVMOperatorVM (ctx , supervisorMachineCtx , vmOperatorVM ); err != nil {
312
+ if err := v .reconcileVMOperatorVM (ctx , supervisorMachineCtx , vmOperatorVM , & affInfo ); err != nil {
212
313
v1beta1conditions .MarkFalse (supervisorMachineCtx .VSphereMachine , infrav1 .VMProvisionedCondition , vmwarev1 .VMCreationFailedReason , clusterv1beta1 .ConditionSeverityWarning ,
213
314
"failed to create or update VirtualMachine: %v" , err )
214
315
v1beta2conditions .Set (supervisorMachineCtx .VSphereMachine , metav1.Condition {
@@ -378,7 +479,7 @@ func (v *VmopMachineService) GetHostInfo(ctx context.Context, machineCtx capvcon
378
479
return vmOperatorVM .Status .Host , nil
379
480
}
380
481
381
- func (v * VmopMachineService ) reconcileVMOperatorVM (ctx context.Context , supervisorMachineCtx * vmware.SupervisorMachineContext , vmOperatorVM * vmoprv1.VirtualMachine ) error {
482
+ func (v * VmopMachineService ) reconcileVMOperatorVM (ctx context.Context , supervisorMachineCtx * vmware.SupervisorMachineContext , vmOperatorVM * vmoprv1.VirtualMachine , affinityInfo * affinityInfo ) error {
382
483
// All Machine resources should define the version of Kubernetes to use.
383
484
if supervisorMachineCtx .Machine .Spec .Version == "" {
384
485
return errors .Errorf (
@@ -472,7 +573,7 @@ func (v *VmopMachineService) reconcileVMOperatorVM(ctx context.Context, supervis
472
573
}
473
574
474
575
// Assign the VM's labels.
475
- vmOperatorVM .Labels = getVMLabels (supervisorMachineCtx , vmOperatorVM .Labels )
576
+ vmOperatorVM .Labels = getVMLabels (supervisorMachineCtx , vmOperatorVM .Labels , affinityInfo )
476
577
477
578
addResourcePolicyAnnotations (supervisorMachineCtx , vmOperatorVM )
478
579
@@ -494,6 +595,15 @@ func (v *VmopMachineService) reconcileVMOperatorVM(ctx context.Context, supervis
494
595
vmOperatorVM = typedModified
495
596
}
496
597
598
+ if affinityInfo != nil && affinityInfo .vmAffinitySpec != nil {
599
+ if vmOperatorVM .Spec .Affinity == nil {
600
+ vmOperatorVM .Spec .Affinity = affinityInfo .vmAffinitySpec
601
+ }
602
+ if vmOperatorVM .Spec .GroupName == "" {
603
+ vmOperatorVM .Spec .GroupName = affinityInfo .vmGroupName
604
+ }
605
+ }
606
+
497
607
// Make sure the VSphereMachine owns the VM Operator VirtualMachine.
498
608
if err := ctrlutil .SetControllerReference (supervisorMachineCtx .VSphereMachine , vmOperatorVM , v .Client .Scheme ()); err != nil {
499
609
return errors .Wrapf (err , "failed to mark %s %s/%s as owner of %s %s/%s" ,
@@ -777,7 +887,7 @@ func (v *VmopMachineService) addVolumes(ctx context.Context, supervisorMachineCt
777
887
}
778
888
779
889
// getVMLabels returns the labels applied to a VirtualMachine.
780
- func getVMLabels (supervisorMachineCtx * vmware.SupervisorMachineContext , vmLabels map [string ]string ) map [string ]string {
890
+ func getVMLabels (supervisorMachineCtx * vmware.SupervisorMachineContext , vmLabels map [string ]string , affinityInfo * affinityInfo ) map [string ]string {
781
891
if vmLabels == nil {
782
892
vmLabels = map [string ]string {}
783
893
}
@@ -791,7 +901,11 @@ func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels
791
901
792
902
// Get the labels that determine the VM's placement inside of a stretched
793
903
// cluster.
794
- topologyLabels := getTopologyLabels (supervisorMachineCtx )
904
+ var failureDomain * string
905
+ if affinityInfo != nil && affinityInfo .failureDomain != nil {
906
+ failureDomain = affinityInfo .failureDomain
907
+ }
908
+ topologyLabels := getTopologyLabels (supervisorMachineCtx , failureDomain )
795
909
for k , v := range topologyLabels {
796
910
vmLabels [k ] = v
797
911
}
@@ -800,6 +914,9 @@ func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels
800
914
// resources associated with the target cluster.
801
915
vmLabels [clusterv1 .ClusterNameLabel ] = supervisorMachineCtx .GetClusterContext ().Cluster .Name
802
916
917
+ // Ensure the VM has the machine deployment name label
918
+ vmLabels [clusterv1 .MachineDeploymentNameLabel ] = supervisorMachineCtx .Machine .Labels [clusterv1 .MachineDeploymentNameLabel ]
919
+
803
920
return vmLabels
804
921
}
805
922
@@ -809,12 +926,18 @@ func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels
809
926
//
810
927
// and thus the code is optimized as such. However, in the future
811
928
// this function may return a more diverse topology.
812
- func getTopologyLabels (supervisorMachineCtx * vmware.SupervisorMachineContext ) map [string ]string {
929
+ func getTopologyLabels (supervisorMachineCtx * vmware.SupervisorMachineContext , failureDomain * string ) map [string ]string {
930
+ // TODO: Make it so that we always set the zone label, might require enquiring the zones present (when unset)
813
931
if fd := supervisorMachineCtx .VSphereMachine .Spec .FailureDomain ; fd != nil && * fd != "" {
814
932
return map [string ]string {
815
933
kubeTopologyZoneLabelKey : * fd ,
816
934
}
817
935
}
936
+ if failureDomain != nil && * failureDomain != "" {
937
+ return map [string ]string {
938
+ kubeTopologyZoneLabelKey : * failureDomain ,
939
+ }
940
+ }
818
941
return nil
819
942
}
820
943
@@ -823,3 +946,16 @@ func getTopologyLabels(supervisorMachineCtx *vmware.SupervisorMachineContext) ma
823
946
func getMachineDeploymentNameForCluster (cluster * clusterv1.Cluster ) string {
824
947
return fmt .Sprintf ("%s-workers-0" , cluster .Name )
825
948
}
949
+
950
+ // checkVirtualMachineGroupMembership checks if the machine is in the first boot order group
951
+ // and performs logic if a match is found.
952
+ func (v * VmopMachineService ) checkVirtualMachineGroupMembership (vmOperatorVMGroup * vmoprv1.VirtualMachineGroup , supervisorMachineCtx * vmware.SupervisorMachineContext ) bool {
953
+ if len (vmOperatorVMGroup .Spec .BootOrder ) > 0 {
954
+ for _ , member := range vmOperatorVMGroup .Spec .BootOrder [0 ].Members {
955
+ if member .Name == supervisorMachineCtx .Machine .Name {
956
+ return true
957
+ }
958
+ }
959
+ }
960
+ return false
961
+ }
0 commit comments