Skip to content

Commit b9feb05

Browse files
Gustavo SantosElod Illes
authored andcommitted
Reattach mdevs to guest on resume
When suspending a VM in OpenStack, Nova detaches all the mediated devices from the guest machine, but does not reattach them on the resume operation. This patch makes Nova reattach the mdevs that were detached when the guest was suspended. This behavior is due to libvirt not supporting the hot-unplug of mediated devices at the time the feature was being developed. The limitation has been lifted since then, and now we have to amend the resume function so it will reattach the mediated devices that were detached on suspension. Changes: doc/source/admin/virtual-gpu.rst NOTE(elod.illes): updated the doc to reflect the new state. Closes-bug: #1948705 Signed-off-by: Gustavo Santos <[email protected]> Change-Id: I083929f36d9e78bf7713a87cae6d581e0d946867 (cherry picked from commit 16f7c60) (cherry picked from commit 15c32e8)
1 parent 6c3d5de commit b9feb05

File tree

4 files changed

+108
-7
lines changed

4 files changed

+108
-7
lines changed

doc/source/admin/virtual-gpu.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -306,6 +306,11 @@ Caveats
306306
that will cause the instance to be set back to ACTIVE. The ``suspend`` action
307307
in the ``os-instance-actions`` API will have an *Error* state.
308308

309+
.. versionchanged:: 25.0.0
310+
311+
This has been resolved in the Yoga release and backported to Xena and
312+
Wallaby. See `bug 1948705`_.
313+
309314
* Resizing an instance with a new flavor that has vGPU resources doesn't
310315
allocate those vGPUs to the instance (the instance is created without
311316
vGPU resources). The proposed workaround is to rebuild the instance after
@@ -355,6 +360,7 @@ For nested vGPUs:
355360

356361
.. _bug 1778563: https://bugs.launchpad.net/nova/+bug/1778563
357362
.. _bug 1762688: https://bugs.launchpad.net/nova/+bug/1762688
363+
.. _bug 1948705: https://bugs.launchpad.net/nova/+bug/1948705
358364

359365
.. Links
360366
.. _Intel GVT-g: https://01.org/igvt-g

nova/tests/unit/virt/libvirt/test_driver.py

Lines changed: 59 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16471,9 +16471,15 @@ def test_resume(self):
1647116471
mock.patch.object(guest, 'sync_guest_time'),
1647216472
mock.patch.object(drvr, '_wait_for_running',
1647316473
side_effect=loopingcall.LoopingCallDone()),
16474+
mock.patch.object(drvr,
16475+
'_get_mdevs_from_guest_config',
16476+
return_value='fake_mdevs'),
16477+
mock.patch.object(drvr, '_attach_mediated_devices'),
1647416478
) as (_get_existing_domain_xml, _create_guest_with_network,
1647516479
_attach_pci_devices, get_instance_pci_devs, get_image_metadata,
16476-
mock_sync_time, mock_wait):
16480+
mock_sync_time, mock_wait,
16481+
_get_mdevs_from_guest_config,
16482+
_attach_mediated_devices):
1647716483
get_image_metadata.return_value = {'bar': 234}
1647816484

1647916485
drvr.resume(self.context, instance, network_info,
@@ -16488,6 +16494,9 @@ def test_resume(self):
1648816494
self.assertTrue(mock_sync_time.called)
1648916495
_attach_pci_devices.assert_has_calls([mock.call(guest,
1649016496
'fake_pci_devs')])
16497+
_attach_mediated_devices.assert_has_calls(
16498+
[mock.call(guest, 'fake_mdevs')]
16499+
)
1649116500

1649216501
@mock.patch.object(host.Host, '_get_domain')
1649316502
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
@@ -25825,6 +25834,55 @@ def test_detach_mediated_devices_raises_exc(self):
2582525834
self.assertRaises(test.TestingException,
2582625835
self._test_detach_mediated_devices, exc)
2582725836

25837+
@mock.patch.object(libvirt_guest.Guest, 'attach_device')
25838+
def _test_attach_mediated_devices(self, side_effect, attach_device):
25839+
dom_without_vgpu = (
25840+
"""<domain> <devices>
25841+
<disk type='file' device='disk'>
25842+
<driver name='qemu' type='qcow2' cache='none'/>
25843+
<source file='xxx'/>
25844+
<target dev='vda' bus='virtio'/>
25845+
<alias name='virtio-disk0'/>
25846+
<address type='pci' domain='0x0000' bus='0x00'
25847+
slot='0x04' function='0x0'/>
25848+
</disk>
25849+
</devices></domain>""")
25850+
25851+
vgpu_xml = (
25852+
"""<domain> <devices>
25853+
<hostdev mode='subsystem' type='mdev' managed='no'
25854+
model='vfio-pci'>
25855+
<source>
25856+
<address uuid='81db53c6-6659-42a0-a34c-1507fdc72983'/>
25857+
</source>
25858+
<alias name='hostdev0'/>
25859+
<address type='pci' domain='0x0000' bus='0x00' slot='0x05'
25860+
function='0x0'/>
25861+
</hostdev>
25862+
</devices></domain>""")
25863+
25864+
attach_device.side_effect = side_effect
25865+
25866+
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
25867+
guest = libvirt_guest.Guest(FakeVirtDomain(fake_xml=dom_without_vgpu))
25868+
mdevs = drvr._get_mdevs_from_guest_config(vgpu_xml)
25869+
drvr._attach_mediated_devices(guest, mdevs)
25870+
return attach_device
25871+
25872+
def test_attach_mediated_devices(self):
25873+
def fake_attach_device(cfg_obj, **kwargs):
25874+
self.assertIsInstance(cfg_obj,
25875+
vconfig.LibvirtConfigGuestHostdevMDEV)
25876+
25877+
attach_mock = self._test_attach_mediated_devices(fake_attach_device)
25878+
attach_mock.assert_called_once_with(mock.ANY, live=True)
25879+
25880+
def test_attach_mediated_devices_raises_exc(self):
25881+
exc = test.TestingException()
25882+
25883+
self.assertRaises(test.TestingException,
25884+
self._test_attach_mediated_devices, exc)
25885+
2582825886
def test_storage_bus_traits__qemu_kvm(self):
2582925887
"""Test getting storage bus traits per virt type.
2583025888
"""

nova/virt/libvirt/driver.py

Lines changed: 37 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3960,6 +3960,10 @@ def resume(self, context, instance, network_info, block_device_info=None):
39603960
"""resume the specified instance."""
39613961
xml = self._get_existing_domain_xml(instance, network_info,
39623962
block_device_info)
3963+
# NOTE(gsantos): The mediated devices that were removed on suspension
3964+
# are still present in the xml. Let's take their references from it
3965+
# and re-attach them.
3966+
mdevs = self._get_mdevs_from_guest_config(xml)
39633967
# NOTE(efried): The instance should already have a vtpm_secret_uuid
39643968
# registered if appropriate.
39653969
guest = self._create_guest_with_network(
@@ -3969,6 +3973,7 @@ def resume(self, context, instance, network_info, block_device_info=None):
39693973
pci_manager.get_instance_pci_devs(instance))
39703974
self._attach_direct_passthrough_ports(
39713975
context, instance, guest, network_info)
3976+
self._attach_mediated_devices(guest, mdevs)
39723977
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_running,
39733978
instance)
39743979
timer.start(interval=0.5).wait()
@@ -7953,12 +7958,6 @@ def _detach_mediated_devices(self, guest):
79537958
guest.detach_device(mdev_cfg, live=True)
79547959
except libvirt.libvirtError as ex:
79557960
error_code = ex.get_error_code()
7956-
# NOTE(sbauza): There is a pending issue with libvirt that
7957-
# doesn't allow to hot-unplug mediated devices. Let's
7958-
# short-circuit the suspend action and set the instance back
7959-
# to ACTIVE.
7960-
# TODO(sbauza): Once libvirt supports this, amend the resume()
7961-
# operation to support reallocating mediated devices.
79627961
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
79637962
reason = _("Suspend is not supported for instances having "
79647963
"attached vGPUs.")
@@ -7967,6 +7966,38 @@ def _detach_mediated_devices(self, guest):
79677966
else:
79687967
raise
79697968

7969+
def _attach_mediated_devices(self, guest, devs):
7970+
for mdev_cfg in devs:
7971+
try:
7972+
guest.attach_device(mdev_cfg, live=True)
7973+
except libvirt.libvirtError as ex:
7974+
error_code = ex.get_error_code()
7975+
if error_code == libvirt.VIR_ERR_DEVICE_MISSING:
7976+
LOG.warning("The mediated device %s was not found and "
7977+
"won't be reattached to %s.", mdev_cfg, guest)
7978+
else:
7979+
raise
7980+
7981+
def _get_mdevs_from_guest_config(self, xml):
7982+
"""Get all libvirt's mediated devices from a guest's config (XML) file.
7983+
We don't have to worry about those devices being used by another guest,
7984+
since they remain allocated for the current guest as long as they are
7985+
present in the XML.
7986+
7987+
:param xml: The XML from the guest we want to get a list of mdevs from.
7988+
7989+
:returns: A list containing the objects that represent the mediated
7990+
devices attached to the guest's config passed as argument.
7991+
"""
7992+
config = vconfig.LibvirtConfigGuest()
7993+
config.parse_str(xml)
7994+
7995+
devs = []
7996+
for dev in config.devices:
7997+
if isinstance(dev, vconfig.LibvirtConfigGuestHostdevMDEV):
7998+
devs.append(dev)
7999+
return devs
8000+
79708001
def _has_numa_support(self):
79718002
# This means that the host can support LibvirtConfigGuestNUMATune
79728003
# and the nodeset field in LibvirtConfigGuestMemoryBackingPage
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
fixes:
3+
- |
4+
Amended the guest resume operation to support mediated devices, as
5+
libvirt's minimum required version (v6.0.0) supports the hot-plug/unplug of
6+
mediated devices, which was addressed in v4.3.0.

0 commit comments

Comments
 (0)