Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 15 additions & 15 deletions api/v1alpha5/virtualmachine_storage_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
package v1alpha5

import (
"slices"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
Expand Down Expand Up @@ -342,19 +340,21 @@ type VirtualMachineVolumeStatus struct {
Error string `json:"error,omitempty"`
}

// SortVirtualMachineVolumeStatuses sorts the provided list of
// VirtualMachineVolumeStatus objects.
func SortVirtualMachineVolumeStatuses(s []VirtualMachineVolumeStatus) {
slices.SortFunc(s, func(a, b VirtualMachineVolumeStatus) int {
switch {
case a.DiskUUID < b.DiskUUID:
return -1
case a.DiskUUID > b.DiskUUID:
return 1
default:
return 0
}
})
// GetControllerType returns the type of controller to which the disk is
// attached.
func (v VirtualMachineVolumeStatus) GetControllerType() VirtualControllerType {
return v.ControllerType
}

// GetControllerBusNumber returns the bus number of the controller to which the
// disk is connected.
func (v VirtualMachineVolumeStatus) GetControllerBusNumber() *int32 {
return v.ControllerBusNumber
}

// GetUnitNumber returns the disk's unit number.
func (v VirtualMachineVolumeStatus) GetUnitNumber() *int32 {
return v.UnitNumber
}

// VirtualMachineStorageStatus defines the observed state of a VirtualMachine's
Expand Down
8 changes: 5 additions & 3 deletions controllers/virtualmachine/volume/volume_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ import (
"github.com/vmware-tanzu/vm-operator/pkg/record"
pkgutil "github.com/vmware-tanzu/vm-operator/pkg/util"
vmopv1util "github.com/vmware-tanzu/vm-operator/pkg/util/vmopv1"
pkgvol "github.com/vmware-tanzu/vm-operator/pkg/util/volumes"
)

// AddToManager adds this package's controller to the provided manager.
Expand Down Expand Up @@ -786,9 +787,10 @@ func (r *Reconciler) processAttachments(
// Update the existing status with the new list of managed volumes.
ctx.VM.Status.Volumes = append(ctx.VM.Status.Volumes, volumeStatuses...)

// This is how the previous code sorted, but IMO keeping in Spec order makes
// more sense.
vmopv1.SortVirtualMachineVolumeStatuses(ctx.VM.Status.Volumes)
pkgvol.SortVirtualMachineVolumeStatuses(
ctx.VM.Status.Volumes,
true, // Sort by UUID.
)

return apierrorsutil.NewAggregate(createErrs)
}
Expand Down
46 changes: 27 additions & 19 deletions controllers/virtualmachine/volumebatch/volumebatch_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ import (
"github.com/vmware-tanzu/vm-operator/pkg/providers/vsphere/constants"
"github.com/vmware-tanzu/vm-operator/pkg/record"
pkgutil "github.com/vmware-tanzu/vm-operator/pkg/util"
pkgvol "github.com/vmware-tanzu/vm-operator/pkg/util/volumes"
)

const (
Expand Down Expand Up @@ -659,16 +660,10 @@ func (r *Reconciler) getVMVolStatusesFromBatchAttachment(
vol.PersistentVolumeClaim.ClaimName == volStatus.PersistentVolumeClaim.ClaimName {

vmVolStatus = attachmentStatusToVolumeStatus(volStatus.Name, volStatus)
existingVol := existingVMManagedVolStatus[vol.Name]
vmVolStatus.Used = existingVol.Used
vmVolStatus.Crypto = existingVol.Crypto
vmVolStatus.ControllerType = existingVol.ControllerType
vmVolStatus.ControllerBusNumber = existingVol.ControllerBusNumber
vmVolStatus.UnitNumber = existingVol.UnitNumber
vmVolStatus.DiskMode = existingVol.DiskMode
vmVolStatus.SharingMode = existingVol.SharingMode

// Add PVC capacity information

updateVolumeStatusWithExistingVMStatus(&vmVolStatus, existingVMManagedVolStatus)

// Add PVC capacity information.
if err := r.updateVolumeStatusWithPVCInfo(
ctx,
volStatus.PersistentVolumeClaim.ClaimName,
Expand Down Expand Up @@ -1029,14 +1024,8 @@ func (r *Reconciler) getVMVolStatusesFromLegacyAttachments(
// vm.status.vol and legacyAttachment.
if vol.PersistentVolumeClaim.ClaimName == att.Spec.VolumeName {
vmVolStatus := legacyAttachmentToVolumeStatus(vol.Name, att)
existingVol := existingVMManagedVolStatus[vol.Name]
vmVolStatus.Used = existingVol.Used
vmVolStatus.Crypto = existingVol.Crypto
vmVolStatus.ControllerType = existingVol.ControllerType
vmVolStatus.ControllerBusNumber = existingVol.ControllerBusNumber
vmVolStatus.UnitNumber = existingVol.UnitNumber
vmVolStatus.DiskMode = existingVol.DiskMode
vmVolStatus.SharingMode = existingVol.SharingMode

updateVolumeStatusWithExistingVMStatus(&vmVolStatus, existingVMManagedVolStatus)

// Add PVC capacity information
if err := r.updateVolumeStatusWithPVCInfo(
Expand Down Expand Up @@ -1096,7 +1085,10 @@ func updateVMVolumeStatus(
ctx.VM.Status.Volumes = append(ctx.VM.Status.Volumes, v2...)

// Sort the volume statuses to ensure consistent ordering.
vmopv1.SortVirtualMachineVolumeStatuses(ctx.VM.Status.Volumes)
pkgvol.SortVirtualMachineVolumeStatuses(
ctx.VM.Status.Volumes,
false, // Sort by TargetID.
)
}

// categorizeVolumeSpecs categorizes the volume specs into two categories:
Expand Down Expand Up @@ -1147,3 +1139,19 @@ func categorizeVolumeSpecs(
}
return volumeSpecsForBatch, volumeSpecsForLegacy
}

// Update the target vmVolStatus with info from existing VM vol status.
func updateVolumeStatusWithExistingVMStatus(
vmVolStatus *vmopv1.VirtualMachineVolumeStatus,
existingVMManagedVolStatusMap map[string]vmopv1.VirtualMachineVolumeStatus) {

existingVolStatus := existingVMManagedVolStatusMap[vmVolStatus.Name]

vmVolStatus.Used = existingVolStatus.Used
vmVolStatus.Crypto = existingVolStatus.Crypto
vmVolStatus.ControllerType = existingVolStatus.ControllerType
vmVolStatus.ControllerBusNumber = existingVolStatus.ControllerBusNumber
vmVolStatus.UnitNumber = existingVolStatus.UnitNumber
vmVolStatus.DiskMode = existingVolStatus.DiskMode
vmVolStatus.SharingMode = existingVolStatus.SharingMode
}
Original file line number Diff line number Diff line change
Expand Up @@ -941,32 +941,38 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type
By("VM Status.Volumes are stable-sorted by Spec.Volumes order", func() {
Expect(vm.Status.Volumes).To(HaveLen(2))
Expect(attachment.Status.VolumeStatus).To(HaveLen(2))
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 1, 0, false)
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 0, 1, false)
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 0, 0, false)
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 1, 1, false)
})
})

When("Collecting limit and usage information", func() {

classicDisk1 := func() vmopv1.VirtualMachineVolumeStatus {
return vmopv1.VirtualMachineVolumeStatus{
Name: "my-disk-0",
Type: vmopv1.VolumeTypeClassic,
Limit: ptr.To(resource.MustParse("10Gi")),
Used: ptr.To(resource.MustParse("91Gi")),
Attached: true,
DiskUUID: "100",
Name: "my-disk-0",
Type: vmopv1.VolumeTypeClassic,
Limit: ptr.To(resource.MustParse("10Gi")),
Used: ptr.To(resource.MustParse("91Gi")),
Attached: true,
DiskUUID: "100",
ControllerBusNumber: ptr.To(int32(1)),
ControllerType: vmopv1.VirtualControllerTypeSCSI,
UnitNumber: ptr.To(int32(0)),
}
}

classicDisk2 := func() vmopv1.VirtualMachineVolumeStatus {
return vmopv1.VirtualMachineVolumeStatus{
Name: "my-disk-1",
Type: vmopv1.VolumeTypeClassic,
Limit: ptr.To(resource.MustParse("15Gi")),
Used: ptr.To(resource.MustParse("5Gi")),
Attached: true,
DiskUUID: "101",
Name: "my-disk-1",
Type: vmopv1.VolumeTypeClassic,
Limit: ptr.To(resource.MustParse("15Gi")),
Used: ptr.To(resource.MustParse("5Gi")),
Attached: true,
DiskUUID: "101",
ControllerBusNumber: ptr.To(int32(1)),
ControllerType: vmopv1.VirtualControllerTypeSCSI,
UnitNumber: ptr.To(int32(1)),
}
}

Expand All @@ -990,15 +996,17 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type
attachment := getCNSBatchAttachmentForVolumeName(ctx, vm)
Expect(attachment).ToNot(BeNil())

By("VM Status.Volumes are sorted by DiskUUID", func() {
By("VM Status.Volumes are stable sorted by Target ID", func() {
Expect(vm.Status.Volumes).To(HaveLen(4))
Expect(attachment.Status.VolumeStatus).To(HaveLen(2))

Expect(vm.Status.Volumes[0]).To(Equal(classicDisk1()))
Expect(vm.Status.Volumes[1]).To(Equal(classicDisk2()))
// Classic disks have target ID.
Expect(vm.Status.Volumes[2]).To(Equal(classicDisk1()))
Expect(vm.Status.Volumes[3]).To(Equal(classicDisk2()))

assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 2, 1, false)
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 3, 0, false)
// Empty target ID so ranked before Classic disks.
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 0, 0, false)
assertVMVolStatusFromBatchAttachmentStatus(vm, attachment, 1, 1, false)
})
}

Expand All @@ -1021,7 +1029,7 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type

GinkgoHelper()

Expect(vm.Status.Volumes[3].Used).To(Equal(ptr.To(resource.MustParse("1Gi"))))
Expect(vm.Status.Volumes[0].Used).To(Equal(ptr.To(resource.MustParse("1Gi"))))
}

It("includes the PVC usage in the result", func() {
Expand Down Expand Up @@ -1052,7 +1060,7 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type

GinkgoHelper()

Expect(vm.Status.Volumes[3].Limit).To(Equal(ptr.To(resource.MustParse("20Gi"))))
Expect(vm.Status.Volumes[0].Limit).To(Equal(ptr.To(resource.MustParse("20Gi"))))
}

When("PVC has limit", func() {
Expand Down Expand Up @@ -1112,7 +1120,7 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type

GinkgoHelper()

Expect(vm.Status.Volumes[3].Crypto).To(Equal(newCryptoStatus()))
Expect(vm.Status.Volumes[0].Crypto).To(Equal(newCryptoStatus()))
}

BeforeEach(func() {
Expand All @@ -1130,6 +1138,31 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type
assertPVCHasCrypto()
})
})

When("Existing status has Controller info", func() {
BeforeEach(func() {
vm.Status.Volumes = append(vm.Status.Volumes,
vmopv1.VirtualMachineVolumeStatus{
Name: vmVol1.Name,
Type: vmopv1.VolumeTypeManaged,
ControllerBusNumber: ptr.To(int32(0)),
ControllerType: vmopv1.VirtualControllerTypeSCSI,
UnitNumber: ptr.To(int32(0)),
},
vmopv1.VirtualMachineVolumeStatus{
Name: vmVol2.Name,
Type: vmopv1.VolumeTypeManaged,
ControllerBusNumber: ptr.To(int32(0)),
ControllerType: vmopv1.VirtualControllerTypeSCSI,
UnitNumber: ptr.To(int32(1)),
},
)
})

It("include the Controller info and sort by controller info", func() {
assertBaselineVolStatus()
})
})
})
})
})
Expand Down Expand Up @@ -1360,7 +1393,7 @@ FaultMessage: ([]vimtypes.LocalizableMessage) \u003cnil\u003e\\n }\\n },\\n Type
attachment := getCNSBatchAttachmentForVolumeName(ctx, vm)
Expect(attachment).ToNot(BeNil())

By("VM Status.Volumes are stable-sorted by Spec.Volumes order", func() {
By("VM Status.Volumes are stable-sorted by Spec.Volumes order, and attached should be False", func() {
Expect(vm.Status.Volumes).To(HaveLen(1))
Expect(attachment.Status.VolumeStatus).To(HaveLen(1))
assertVMVolStatusFromBatchAttachmentSpec(vm, attachment, 0, 0)
Expand Down
7 changes: 6 additions & 1 deletion pkg/providers/vsphere/vmlifecycle/update_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -1438,7 +1438,12 @@ func updateVolumeStatus(vmCtx pkgctx.VirtualMachineContext) {
})

// This sort order is consistent with the logic from the volumes controller.
vmopv1.SortVirtualMachineVolumeStatuses(vm.Status.Volumes)
// If VMSharedDisks is enabled, sort by TargetID.
sortByUUID := !pkgcfg.FromContext(vmCtx).Features.VMSharedDisks
pkgvol.SortVirtualMachineVolumeStatuses(
vm.Status.Volumes,
sortByUUID,
)
}

type probeResult uint8
Expand Down
Loading