Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,13 @@ func (c *Constructor) Setup() util.Processors {
},
}
}
if oldFirmware := vmBuilder.VirtualMachine.Spec.Template.Spec.Domain.Firmware; oldFirmware != nil {
if firmware == nil {
firmware = &kubevirtv1.Firmware{}
}
firmware.UUID = oldFirmware.UUID
firmware.Serial = oldFirmware.Serial
}
Comment on lines +77 to +83
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

could you please explain a bit more about this? Is this related to cdrom hotplug?

Copy link
Member Author

@wheatdog wheatdog Feb 13, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This change is important for hotplug actions (not just for the cdrom hotplug but also for the existing hotplug volume). Without this change, the updated VM will have different firmware serial and uuid, causing the RestartRequired condition.

Take this example using binary from master branch (hotplug volume),

❯ cat main.tf
resource "harvester_volume" "mount-disk" {
  name      = "mount-disk"
  namespace = "default"

  size = "1Gi"
}

resource "harvester_virtualmachine" "fedora" {
  name                 = "fedora"
  namespace            = "default"

  cpu    = 1
  memory = "2Gi"

  efi         = true
  secure_boot = true

  hostname        = "fedora"

  network_interface {
    name           = "nic-1"
    wait_for_lease = true
  }

  disk {
    name       = "rootdisk"
    type       = "disk"
    bus        = "virtio"
    boot_order = 1

    container_image_name = "kubevirt/fedora-cloud-container-disk-demo:v0.35.0"
  }

  # uncomment for the second terraform apply to do volume hotplug 
  # disk {
  #   name = "mount-disk"
  #   type = "disk"
  #   bus  = "scsi"
  #
  #   existing_volume_name = harvester_volume.mount-disk.name
  #   auto_delete          = false
  #   hot_plug             = true
  # }

}

❯ terraform apply

❯ kubectl get vm fedora -o yaml > /tmp/first.yaml

/* edit main.tf to uncomment for the second terraform apply to do volume hotplug  */

❯ terraform apply

❯ kubectl get vm fedora -o yaml > /tmp/second.yaml

❯ diff /tmp/first.yaml /tmp/second.yaml
6a7
>     harvesterhci.io/volumeClaimTemplates: '[]'
13c14
<   generation: 1
---
>   generation: 3
19c20
<   resourceVersion: "1157979"
---
>   resourceVersion: "1161276"
26c27
<         harvesterhci.io/sshNames: '[]'
---
>         harvesterhci.io/sshNames: "null"
55a57,59
>           - disk:
>               bus: scsi
>             name: mount-disk
69,70c73
<           serial: 78ff9029-d200-4ed7-972f-8222e81d3de4
<           uuid: 8c491cef-9ab6-4644-abe8-22876906a927
---
>           uuid: 98f07cdd-96da-5880-b6c7-1a5700b73dc4
92a96,99
>       - name: mount-disk
>         persistentVolumeClaim:
>           claimName: mount-disk
>           hotpluggable: true
106a114,118
>   - lastProbeTime: null
>     lastTransitionTime: "2026-02-13T00:42:15Z"
>     message: a non-live-updatable field was changed in the template spec
>     status: "True"
>     type: RestartRequired
108c120
<   desiredGeneration: 1
---
>   desiredGeneration: 3
116a129,130
>   - enabled: true
>     name: mount-disk

For cdrom hotplug, check the integration test.

This change fixed it by preserving the old firmware's uuid and serial.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the explanation!

vmBuilder.VirtualMachine.Spec.Template.Spec.Domain.Firmware = firmware
return nil
},
Expand Down Expand Up @@ -216,6 +223,8 @@ func (c *Constructor) Setup() util.Processors {
vmBuilder.ExistingPVCVolume(diskName, existingVolumeName, hotPlug)
} else if containerImageName != "" {
vmBuilder.ContainerDiskVolume(diskName, containerImageName, builder.DefaultImagePullPolicy)
} else if isCDRom && imageNamespacedName == "" {
// Empty CDRom: don't prepare volume
} else {
pvcOption := &builder.PersistentVolumeClaimOption{
VolumeMode: corev1.PersistentVolumeBlock,
Expand Down
213 changes: 213 additions & 0 deletions internal/tests/resource_virtualmachine_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
kubevirtv1 "kubevirt.io/api/core/v1"

Expand Down Expand Up @@ -487,6 +488,178 @@ resource "harvester_virtualmachine" "test-acc-labels" {
})
}

func TestAccVirtualMachine_hotplug_cdrom_volume(t *testing.T) {
var (
testAccImageName = "test-acc-hp-cdrom-img"
testAccVirtualMachineName = "test-acc-hp-cdrom"
testAccVirtualMachineNamespace = "default"
testAccImageResourceName = constants.ResourceTypeImage + "." + testAccImageName
testAccVirtualMachineResourceName = constants.ResourceTypeVirtualMachine + "." + testAccVirtualMachineName
vm = &kubevirtv1.VirtualMachine{}
vmiUid types.UID
ctx = context.Background()
)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckVirtualMachineDestroy(ctx),
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(`
resource "harvester_virtualmachine" "%s" {
name = "%s"
namespace = "%s"

cpu = 1
memory = "1Gi"

run_strategy = "RerunOnFailure"
machine_type = "q35"

network_interface {
name = "default"
}

disk {
name = "rootdisk"
type = "disk"
bus = "virtio"
boot_order = 1

container_image_name = "kubevirt/fedora-cloud-container-disk-demo:v0.35.0"
}

disk {
name = "cd1"
type = "cd-rom"
bus = "sata"
boot_order = 2
}
}
`,
testAccVirtualMachineName, testAccVirtualMachineName, testAccVirtualMachineNamespace,
),
Check: resource.ComposeAggregateTestCheckFunc(
testAccVirtualMachineExists(ctx, testAccVirtualMachineResourceName, vm),
testAccCheckCdRomSpec(ctx, testAccVirtualMachineNamespace, testAccVirtualMachineName, 2, 1),
func(s *terraform.State) error {
c, err := testAccProvider.Meta().(*config.Config).K8sClient()
if err != nil {
return err
}
vmi, err := c.HarvesterClient.KubevirtV1().VirtualMachineInstances(testAccVirtualMachineNamespace).Get(ctx, testAccVirtualMachineName, metav1.GetOptions{})
if err != nil {
return err
}
vmiUid = vmi.UID
return nil
},
),
},
{
Config: fmt.Sprintf(`
resource harvester_image "%s" {
name = "%s"
namespace = "%s"
display_name = "%s"
source_type = "download"
url = "https://distro.ibiblio.org/tinycorelinux/16.x/x86/release/TinyCore-current.iso"
storage_class_name = "harvester-longhorn"
}

resource "harvester_virtualmachine" "%s" {
name = "%s"
namespace = "%s"

cpu = 1
memory = "1Gi"

run_strategy = "RerunOnFailure"
machine_type = "q35"

network_interface {
name = "default"
}

disk {
name = "rootdisk"
type = "disk"
bus = "virtio"
boot_order = 1

container_image_name = "kubevirt/fedora-cloud-container-disk-demo:v0.35.0"
}

disk {
name = "cd1"
type = "cd-rom"
bus = "sata"
boot_order = 2

size = "1Gi"
hot_plug = true
image = "%s/%s"
auto_delete = true
}
}
`,
testAccImageName, testAccImageName, testAccVirtualMachineNamespace, testAccImageName,
testAccVirtualMachineName, testAccVirtualMachineName, testAccVirtualMachineNamespace,
testAccVirtualMachineNamespace, testAccImageName,
),
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttr(testAccImageResourceName, constants.FieldCommonName, "test-acc-hp-cdrom-img"),
resource.TestCheckResourceAttr(testAccImageResourceName, constants.FieldCommonNamespace, "default"),
testAccVirtualMachineExists(ctx, testAccVirtualMachineResourceName, vm),
testAccCheckCdRomSpec(ctx, testAccVirtualMachineNamespace, testAccVirtualMachineName, 2, 2),
testAccCheckVmiUid(ctx, testAccVirtualMachineNamespace, testAccVirtualMachineName, &vmiUid),
),
},
{
Config: fmt.Sprintf(`
resource "harvester_virtualmachine" "%s" {
name = "%s"
namespace = "%s"

cpu = 1
memory = "1Gi"

run_strategy = "RerunOnFailure"
machine_type = "q35"

network_interface {
name = "default"
}

disk {
name = "rootdisk"
type = "disk"
bus = "virtio"
boot_order = 1

container_image_name = "kubevirt/fedora-cloud-container-disk-demo:v0.35.0"
}

disk {
name = "cd1"
type = "cd-rom"
bus = "sata"
boot_order = 2
}
}
`,
testAccVirtualMachineName, testAccVirtualMachineName, testAccVirtualMachineNamespace,
),
Check: resource.ComposeAggregateTestCheckFunc(
testAccVirtualMachineExists(ctx, testAccVirtualMachineResourceName, vm),
testAccCheckCdRomSpec(ctx, testAccVirtualMachineNamespace, testAccVirtualMachineName, 2, 1),
testAccCheckVmiUid(ctx, testAccVirtualMachineNamespace, testAccVirtualMachineName, &vmiUid),
),
},
},
})
}
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Although we got the "Complex Method" notice from CodeFactor, it's intentional to have TestAccVirtualMachine_hotplug_cdrom_volume perform

  1. create VM with empty cdrom
  2. create a VM image and insert it into the VM
  3. eject the media out of the VM

Therefore, I think we can ignore this warning from CodeFactor.


func testAccVirtualMachineExists(ctx context.Context, n string, vm *kubevirtv1.VirtualMachine) resource.TestCheckFunc {
return func(s *terraform.State) error {
foundVM, err := testAccGetVirtualMachine(ctx, s, n)
Expand All @@ -498,6 +671,46 @@ func testAccVirtualMachineExists(ctx context.Context, n string, vm *kubevirtv1.V
}
}

func testAccCheckCdRomSpec(ctx context.Context, vmNamespace, vmName string, expectedDisksCnt, expectedVolumeCnts int) resource.TestCheckFunc {
return func(s *terraform.State) error {
c, err := testAccProvider.Meta().(*config.Config).K8sClient()
if err != nil {
return err
}
vm, err := c.HarvesterClient.KubevirtV1().VirtualMachines(vmNamespace).Get(ctx, vmName, metav1.GetOptions{})
if err != nil {
return err
}
disksCnt := len(vm.Spec.Template.Spec.Domain.Devices.Disks)
if disksCnt != expectedDisksCnt {
return fmt.Errorf("Should have %d disk devices but got %d", expectedDisksCnt, disksCnt)
}
volumeCnts := len(vm.Spec.Template.Spec.Volumes)
if volumeCnts != expectedVolumeCnts {
return fmt.Errorf("Should have %d volumes but got %d", expectedVolumeCnts, volumeCnts)
}

return nil
}
}

func testAccCheckVmiUid(ctx context.Context, vmNamespace, vmName string, vmiUid *types.UID) resource.TestCheckFunc {
return func(s *terraform.State) error {
c, err := testAccProvider.Meta().(*config.Config).K8sClient()
if err != nil {
return err
}
vmi, err := c.HarvesterClient.KubevirtV1().VirtualMachineInstances(vmNamespace).Get(ctx, vmName, metav1.GetOptions{})
if err != nil {
return err
}
if vmi.UID != *vmiUid {
return fmt.Errorf("Shouldn't trigger VMI re-creation. Expected: %s, Got: %s", *vmiUid, vmi.UID)
}
return nil
}
}

func testAccVirtualMachineLabels(ctx context.Context, n string, labels map[string]string) resource.TestCheckFunc {
return func(s *terraform.State) error {
vm, err := testAccGetVirtualMachine(ctx, s, n)
Expand Down
68 changes: 36 additions & 32 deletions pkg/importer/resource_virtualmachine_importer.go
Original file line number Diff line number Diff line change
Expand Up @@ -267,47 +267,51 @@ func (v *VMImporter) Volume() ([]map[string]interface{}, []map[string]interface{
var (
disks = v.VirtualMachine.Spec.Template.Spec.Domain.Devices.Disks
volumes = v.VirtualMachine.Spec.Template.Spec.Volumes
volumesMap = make(map[string]kubevirtv1.Volume, len(volumes))
cloudInitState = make([]map[string]interface{}, 0, 1)
diskStates = make([]map[string]interface{}, 0, len(disks))
)

for _, volume := range volumes {
volumesMap[volume.Name] = volume
}

for _, disk := range disks {
diskState := make(map[string]interface{})
for _, disk := range disks {
if volume.Name != disk.Name {
continue
}
Comment on lines -276 to -278
Copy link
Member

@brandboat brandboat Feb 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So this fixed the missing hotplug disk issue, right? The original behavior is iterating over volumes, and skip disk if its name not match the volume name. That could be a problem since hotplug empty cd-rom is not defined in the volume section.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Exactly! This change is for empty cdrom.

var (
diskType string
diskBus string
)
if disk.CDRom != nil {
diskType = builder.DiskTypeCDRom
diskBus = string(disk.CDRom.Bus)
} else if disk.Disk != nil {
diskType = builder.DiskTypeDisk
diskBus = string(disk.Disk.Bus)
} else {
return nil, nil, fmt.Errorf("unsupported volume type found on volume %s. ", disk.Name)
}
diskState[constants.FieldDiskName] = disk.Name
diskState[constants.FieldDiskBootOrder] = disk.BootOrder
diskState[constants.FieldDiskType] = diskType
diskState[constants.FieldDiskBus] = diskBus
}
if volume.CloudInitNoCloud != nil || volume.CloudInitConfigDrive != nil {
cloudInitState = v.cloudInit(volume)
var (
diskType string
diskBus string
)
if disk.CDRom != nil {
diskType = builder.DiskTypeCDRom
diskBus = string(disk.CDRom.Bus)
} else if disk.Disk != nil {
diskType = builder.DiskTypeDisk
diskBus = string(disk.Disk.Bus)
} else {
if volume.PersistentVolumeClaim != nil {
if err := v.pvcVolume(volume, diskState); err != nil {
return nil, nil, err
}
} else if volume.ContainerDisk != nil {
diskState[constants.FieldDiskContainerImageName] = volume.ContainerDisk.Image
return nil, nil, fmt.Errorf("unsupported volume type found on volume %s. ", disk.Name)
}
diskState[constants.FieldDiskName] = disk.Name
diskState[constants.FieldDiskBootOrder] = disk.BootOrder
diskState[constants.FieldDiskType] = diskType
diskState[constants.FieldDiskBus] = diskBus

if volume, hasVolume := volumesMap[disk.Name]; hasVolume {
if volume.CloudInitNoCloud != nil || volume.CloudInitConfigDrive != nil {
cloudInitState = v.cloudInit(volume)
} else {
return nil, nil, fmt.Errorf("unsupported volume type found on volume %s. ", volume.Name)
if volume.PersistentVolumeClaim != nil {
if err := v.pvcVolume(volume, diskState); err != nil {
return nil, nil, err
}
} else if volume.ContainerDisk != nil {
diskState[constants.FieldDiskContainerImageName] = volume.ContainerDisk.Image
} else {
return nil, nil, fmt.Errorf("unsupported volume type found on volume %s. ", volume.Name)
}
}
diskStates = append(diskStates, diskState)
}
diskStates = append(diskStates, diskState)
}
return diskStates, cloudInitState, nil
}
Expand Down