Skip to content

Commit a2a5231

Browse files
committed
Sort voluems by targetID
1 parent ee39405 commit a2a5231

File tree

7 files changed

+290
-129
lines changed

7 files changed

+290
-129
lines changed

api/v1alpha5/virtualmachine_storage_types.go

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,6 @@
55
package v1alpha5
66

77
import (
8-
"slices"
9-
108
corev1 "k8s.io/api/core/v1"
119
"k8s.io/apimachinery/pkg/api/resource"
1210
)
@@ -342,19 +340,21 @@ type VirtualMachineVolumeStatus struct {
342340
Error string `json:"error,omitempty"`
343341
}
344342

345-
// SortVirtualMachineVolumeStatuses sorts the provided list of
346-
// VirtualMachineVolumeStatus objects.
347-
func SortVirtualMachineVolumeStatuses(s []VirtualMachineVolumeStatus) {
348-
slices.SortFunc(s, func(a, b VirtualMachineVolumeStatus) int {
349-
switch {
350-
case a.DiskUUID < b.DiskUUID:
351-
return -1
352-
case a.DiskUUID > b.DiskUUID:
353-
return 1
354-
default:
355-
return 0
356-
}
357-
})
343+
// GetControllerType returns the type of controller to which the disk is
344+
// attached.
345+
func (v VirtualMachineVolumeStatus) GetControllerType() VirtualControllerType {
346+
return v.ControllerType
347+
}
348+
349+
// GetControllerBusNumber returns the bus number of the controller to which the
350+
// disk is connected.
351+
func (v VirtualMachineVolumeStatus) GetControllerBusNumber() *int32 {
352+
return v.ControllerBusNumber
353+
}
354+
355+
// GetUnitNumber returns the disk's unit number.
356+
func (v VirtualMachineVolumeStatus) GetUnitNumber() *int32 {
357+
return v.UnitNumber
358358
}
359359

360360
// VirtualMachineStorageStatus defines the observed state of a VirtualMachine's

controllers/virtualmachine/volume/volume_controller.go

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ import (
4545
"github.com/vmware-tanzu/vm-operator/pkg/record"
4646
pkgutil "github.com/vmware-tanzu/vm-operator/pkg/util"
4747
vmopv1util "github.com/vmware-tanzu/vm-operator/pkg/util/vmopv1"
48+
pkgvol "github.com/vmware-tanzu/vm-operator/pkg/util/volumes"
4849
)
4950

5051
// AddToManager adds this package's controller to the provided manager.
@@ -786,9 +787,10 @@ func (r *Reconciler) processAttachments(
786787
// Update the existing status with the new list of managed volumes.
787788
ctx.VM.Status.Volumes = append(ctx.VM.Status.Volumes, volumeStatuses...)
788789

789-
// This is how the previous code sorted, but IMO keeping in Spec order makes
790-
// more sense.
791-
vmopv1.SortVirtualMachineVolumeStatuses(ctx.VM.Status.Volumes)
790+
pkgvol.SortVirtualMachineVolumeStatuses(
791+
ctx.VM.Status.Volumes,
792+
true, // Sort by UUID.
793+
)
792794

793795
return apierrorsutil.NewAggregate(createErrs)
794796
}

controllers/virtualmachine/volumebatch/volumebatch_controller.go

Lines changed: 27 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ import (
4141
"github.com/vmware-tanzu/vm-operator/pkg/providers/vsphere/constants"
4242
"github.com/vmware-tanzu/vm-operator/pkg/record"
4343
pkgutil "github.com/vmware-tanzu/vm-operator/pkg/util"
44+
pkgvol "github.com/vmware-tanzu/vm-operator/pkg/util/volumes"
4445
)
4546

4647
const (
@@ -659,16 +660,10 @@ func (r *Reconciler) getVMVolStatusesFromBatchAttachment(
659660
vol.PersistentVolumeClaim.ClaimName == volStatus.PersistentVolumeClaim.ClaimName {
660661

661662
vmVolStatus = attachmentStatusToVolumeStatus(volStatus.Name, volStatus)
662-
existingVol := existingVMManagedVolStatus[vol.Name]
663-
vmVolStatus.Used = existingVol.Used
664-
vmVolStatus.Crypto = existingVol.Crypto
665-
vmVolStatus.ControllerType = existingVol.ControllerType
666-
vmVolStatus.ControllerBusNumber = existingVol.ControllerBusNumber
667-
vmVolStatus.UnitNumber = existingVol.UnitNumber
668-
vmVolStatus.DiskMode = existingVol.DiskMode
669-
vmVolStatus.SharingMode = existingVol.SharingMode
670-
671-
// Add PVC capacity information
663+
664+
updateVolumeStatusWithExistingVMStatus(&vmVolStatus, existingVMManagedVolStatus)
665+
666+
// Add PVC capacity information.
672667
if err := r.updateVolumeStatusWithPVCInfo(
673668
ctx,
674669
volStatus.PersistentVolumeClaim.ClaimName,
@@ -1029,14 +1024,8 @@ func (r *Reconciler) getVMVolStatusesFromLegacyAttachments(
10291024
// vm.status.vol and legacyAttachment.
10301025
if vol.PersistentVolumeClaim.ClaimName == att.Spec.VolumeName {
10311026
vmVolStatus := legacyAttachmentToVolumeStatus(vol.Name, att)
1032-
existingVol := existingVMManagedVolStatus[vol.Name]
1033-
vmVolStatus.Used = existingVol.Used
1034-
vmVolStatus.Crypto = existingVol.Crypto
1035-
vmVolStatus.ControllerType = existingVol.ControllerType
1036-
vmVolStatus.ControllerBusNumber = existingVol.ControllerBusNumber
1037-
vmVolStatus.UnitNumber = existingVol.UnitNumber
1038-
vmVolStatus.DiskMode = existingVol.DiskMode
1039-
vmVolStatus.SharingMode = existingVol.SharingMode
1027+
1028+
updateVolumeStatusWithExistingVMStatus(&vmVolStatus, existingVMManagedVolStatus)
10401029

10411030
// Add PVC capacity information
10421031
if err := r.updateVolumeStatusWithPVCInfo(
@@ -1096,7 +1085,10 @@ func updateVMVolumeStatus(
10961085
ctx.VM.Status.Volumes = append(ctx.VM.Status.Volumes, v2...)
10971086

10981087
// Sort the volume statuses to ensure consistent ordering.
1099-
vmopv1.SortVirtualMachineVolumeStatuses(ctx.VM.Status.Volumes)
1088+
pkgvol.SortVirtualMachineVolumeStatuses(
1089+
ctx.VM.Status.Volumes,
1090+
false, // Sort by TargetID.
1091+
)
11001092
}
11011093

11021094
// categorizeVolumeSpecs categorizes the volume specs into two categories:
@@ -1147,3 +1139,19 @@ func categorizeVolumeSpecs(
11471139
}
11481140
return volumeSpecsForBatch, volumeSpecsForLegacy
11491141
}
1142+
1143+
// Update the target vmVolStatus with info from existing VM vol status.
1144+
func updateVolumeStatusWithExistingVMStatus(
1145+
vmVolStatus *vmopv1.VirtualMachineVolumeStatus,
1146+
existingVMManagedVolStatusMap map[string]vmopv1.VirtualMachineVolumeStatus) {
1147+
1148+
existingVolStatus := existingVMManagedVolStatusMap[vmVolStatus.Name]
1149+
1150+
vmVolStatus.Used = existingVolStatus.Used
1151+
vmVolStatus.Crypto = existingVolStatus.Crypto
1152+
vmVolStatus.ControllerType = existingVolStatus.ControllerType
1153+
vmVolStatus.ControllerBusNumber = existingVolStatus.ControllerBusNumber
1154+
vmVolStatus.UnitNumber = existingVolStatus.UnitNumber
1155+
vmVolStatus.DiskMode = existingVolStatus.DiskMode
1156+
vmVolStatus.SharingMode = existingVolStatus.SharingMode
1157+
}

controllers/virtualmachine/volumebatch/volumebatch_controller_unit_test.go

Lines changed: 56 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -941,32 +941,38 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type
941941
By("VM Status.Volumes are stable-sorted by Spec.Volumes order", func() {
942942
Expect(vm.Status.Volumes).To(HaveLen(2))
943943
Expect(attachment.Status.VolumeStatus).To(HaveLen(2))
944-
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 1, 0, false)
945-
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 0, 1, false)
944+
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 0, 0, false)
945+
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 1, 1, false)
946946
})
947947
})
948948

949949
When("Collecting limit and usage information", func() {
950950

951951
classicDisk1 := func() vmopv1.VirtualMachineVolumeStatus {
952952
return vmopv1.VirtualMachineVolumeStatus{
953-
Name: "my-disk-0",
954-
Type: vmopv1.VolumeTypeClassic,
955-
Limit: ptr.To(resource.MustParse("10Gi")),
956-
Used: ptr.To(resource.MustParse("91Gi")),
957-
Attached: true,
958-
DiskUUID: "100",
953+
Name: "my-disk-0",
954+
Type: vmopv1.VolumeTypeClassic,
955+
Limit: ptr.To(resource.MustParse("10Gi")),
956+
Used: ptr.To(resource.MustParse("91Gi")),
957+
Attached: true,
958+
DiskUUID: "100",
959+
ControllerBusNumber: ptr.To(int32(1)),
960+
ControllerType: vmopv1.VirtualControllerTypeSCSI,
961+
UnitNumber: ptr.To(int32(0)),
959962
}
960963
}
961964

962965
classicDisk2 := func() vmopv1.VirtualMachineVolumeStatus {
963966
return vmopv1.VirtualMachineVolumeStatus{
964-
Name: "my-disk-1",
965-
Type: vmopv1.VolumeTypeClassic,
966-
Limit: ptr.To(resource.MustParse("15Gi")),
967-
Used: ptr.To(resource.MustParse("5Gi")),
968-
Attached: true,
969-
DiskUUID: "101",
967+
Name: "my-disk-1",
968+
Type: vmopv1.VolumeTypeClassic,
969+
Limit: ptr.To(resource.MustParse("15Gi")),
970+
Used: ptr.To(resource.MustParse("5Gi")),
971+
Attached: true,
972+
DiskUUID: "101",
973+
ControllerBusNumber: ptr.To(int32(1)),
974+
ControllerType: vmopv1.VirtualControllerTypeSCSI,
975+
UnitNumber: ptr.To(int32(1)),
970976
}
971977
}
972978

@@ -990,15 +996,17 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type
990996
attachment := getCNSBatchAttachmentForVolumeName(ctx, vm)
991997
Expect(attachment).ToNot(BeNil())
992998

993-
By("VM Status.Volumes are sorted by DiskUUID", func() {
999+
By("VM Status.Volumes are stable sorted by Target ID", func() {
9941000
Expect(vm.Status.Volumes).To(HaveLen(4))
9951001
Expect(attachment.Status.VolumeStatus).To(HaveLen(2))
9961002

997-
Expect(vm.Status.Volumes[0]).To(Equal(classicDisk1()))
998-
Expect(vm.Status.Volumes[1]).To(Equal(classicDisk2()))
1003+
// Classic disks have target ID.
1004+
Expect(vm.Status.Volumes[2]).To(Equal(classicDisk1()))
1005+
Expect(vm.Status.Volumes[3]).To(Equal(classicDisk2()))
9991006

1000-
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 2, 1, false)
1001-
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 3, 0, false)
1007+
// Empty target ID so ranked before Classic disks.
1008+
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 0, 0, false)
1009+
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 1, 1, false)
10021010
})
10031011
}
10041012

@@ -1021,7 +1029,7 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type
10211029

10221030
GinkgoHelper()
10231031

1024-
Expect(vm.Status.Volumes[3].Used).To(Equal(ptr.To(resource.MustParse("1Gi"))))
1032+
Expect(vm.Status.Volumes[0].Used).To(Equal(ptr.To(resource.MustParse("1Gi"))))
10251033
}
10261034

10271035
It("includes the PVC usage in the result", func() {
@@ -1052,7 +1060,7 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type
10521060

10531061
GinkgoHelper()
10541062

1055-
Expect(vm.Status.Volumes[3].Limit).To(Equal(ptr.To(resource.MustParse("20Gi"))))
1063+
Expect(vm.Status.Volumes[0].Limit).To(Equal(ptr.To(resource.MustParse("20Gi"))))
10561064
}
10571065

10581066
When("PVC has limit", func() {
@@ -1112,7 +1120,7 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type
11121120

11131121
GinkgoHelper()
11141122

1115-
Expect(vm.Status.Volumes[3].Crypto).To(Equal(newCryptoStatus()))
1123+
Expect(vm.Status.Volumes[0].Crypto).To(Equal(newCryptoStatus()))
11161124
}
11171125

11181126
BeforeEach(func() {
@@ -1130,6 +1138,31 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type
11301138
assertPVCHasCrypto()
11311139
})
11321140
})
1141+
1142+
When("Existing status has Controller info", func() {
1143+
BeforeEach(func() {
1144+
vm.Status.Volumes = append(vm.Status.Volumes,
1145+
vmopv1.VirtualMachineVolumeStatus{
1146+
Name: vmVol1.Name,
1147+
Type: vmopv1.VolumeTypeManaged,
1148+
ControllerBusNumber: ptr.To(int32(0)),
1149+
ControllerType: vmopv1.VirtualControllerTypeSCSI,
1150+
UnitNumber: ptr.To(int32(0)),
1151+
},
1152+
vmopv1.VirtualMachineVolumeStatus{
1153+
Name: vmVol2.Name,
1154+
Type: vmopv1.VolumeTypeManaged,
1155+
ControllerBusNumber: ptr.To(int32(0)),
1156+
ControllerType: vmopv1.VirtualControllerTypeSCSI,
1157+
UnitNumber: ptr.To(int32(1)),
1158+
},
1159+
)
1160+
})
1161+
1162+
It("include the Controller info and sort by controller info", func() {
1163+
assertBaselineVolStatus()
1164+
})
1165+
})
11331166
})
11341167
})
11351168
})
@@ -1360,7 +1393,7 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type
13601393
attachment := getCNSBatchAttachmentForVolumeName(ctx, vm)
13611394
Expect(attachment).ToNot(BeNil())
13621395

1363-
By("VM Status.Volumes are stable-sorted by Spec.Volumes order", func() {
1396+
By("VM Status.Volumes are stable-sorted by Spec.Volumes order, and attached should be False", func() {
13641397
Expect(vm.Status.Volumes).To(HaveLen(1))
13651398
Expect(attachment.Status.VolumeStatus).To(HaveLen(1))
13661399
assertVMVolStatusFromBatchAttachmentSpec(vm, attachment, 0, 0)

pkg/providers/vsphere/vmlifecycle/update_status.go

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1438,7 +1438,12 @@ func updateVolumeStatus(vmCtx pkgctx.VirtualMachineContext) {
14381438
})
14391439

14401440
// This sort order is consistent with the logic from the volumes controller.
1441-
vmopv1.SortVirtualMachineVolumeStatuses(vm.Status.Volumes)
1441+
// If VMSharedDisks is enabled, sort by TargetID.
1442+
sortByUUID := !pkgcfg.FromContext(vmCtx).Features.VMSharedDisks
1443+
pkgvol.SortVirtualMachineVolumeStatuses(
1444+
vm.Status.Volumes,
1445+
sortByUUID,
1446+
)
14421447
}
14431448

14441449
type probeResult uint8

0 commit comments

Comments
 (0)