Skip to content

Commit 806575c

Browse files
committed
Detach is broken for multi-attached fs-based volumes
Fixed an issue with detaching multi-attached fs-based volumes. Volume drivers using _HostMountStateManager are special case. _HostMountStateManager ensures that the compute node only attempts to mount a single mountpoint in use by multiple attachments once, and that it is not unmounted until it is no longer in use by any attachments. So we can skip the multiattach check for volume drivers that based on LibvirtMountedFileSystemVolumeDriver. Closes-Bug: 1888022 Change-Id: Ia91b63c0676f42ad8a7a0d16e6870bafc2ee7675
1 parent 64980bd commit 806575c

File tree

3 files changed

+64
-31
lines changed

3 files changed

+64
-31
lines changed

nova/tests/unit/virt/libvirt/test_driver.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,7 @@
121121
from nova.virt.libvirt.storage import rbd_utils
122122
from nova.virt.libvirt import utils as libvirt_utils
123123
from nova.virt.libvirt import vif as libvirt_vif
124+
from nova.virt.libvirt.volume import fs as fs_drivers
124125
from nova.virt.libvirt.volume import volume as volume_drivers
125126

126127

@@ -9149,6 +9150,20 @@ def test_connect_volume_native_luks_workaround(self,
91499150
mock_encryptor.attach_volume.assert_called_once_with(
91509151
self.context, **encryption)
91519152

9153+
def test_should_disconnect_target_multi_attach_filesystem_driver(self):
9154+
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
9155+
volume_driver = mock.MagicMock(
9156+
spec=fs_drivers.LibvirtMountedFileSystemVolumeDriver)
9157+
self.assertTrue(drvr._should_disconnect_target(
9158+
self.context, None, True, volume_driver, None))
9159+
9160+
def test_should_disconnect_target_single_attach_filesystem_driver(self):
9161+
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
9162+
volume_driver = mock.MagicMock(
9163+
spec=fs_drivers.LibvirtMountedFileSystemVolumeDriver)
9164+
self.assertTrue(drvr._should_disconnect_target(
9165+
self.context, None, False, volume_driver, None))
9166+
91529167
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
91539168
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryptor')
91549169
def test_disconnect_volume_native_luks_workaround(self,

nova/virt/libvirt/driver.py

Lines changed: 43 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,7 @@
125125
from nova.virt.libvirt.storage import rbd_utils
126126
from nova.virt.libvirt import utils as libvirt_utils
127127
from nova.virt.libvirt import vif as libvirt_vif
128+
from nova.virt.libvirt.volume import fs
128129
from nova.virt.libvirt.volume import mount
129130
from nova.virt.libvirt.volume import remotefs
130131
from nova.virt import netutils
@@ -1609,9 +1610,8 @@ def _connect_volume(self, context, connection_info, instance,
16091610
"volume connection", instance=instance)
16101611
vol_driver.disconnect_volume(connection_info, instance)
16111612

1612-
def _should_disconnect_target(self, context, connection_info, instance):
1613-
connection_count = 0
1614-
1613+
def _should_disconnect_target(self, context, instance, multiattach,
1614+
vol_driver, volume_id):
16151615
# NOTE(jdg): Multiattach is a special case (not to be confused
16161616
# with shared_targets). With multiattach we may have a single volume
16171617
# attached multiple times to *this* compute node (ie Server-1 and
@@ -1621,41 +1621,53 @@ def _should_disconnect_target(self, context, connection_info, instance):
16211621
# will indiscriminantly delete the connection for all Server and that's
16221622
# no good. So check if it's attached multiple times on this node
16231623
# if it is we skip the call to brick to delete the connection.
1624-
if connection_info.get('multiattach', False):
1625-
volume = self._volume_api.get(
1626-
context,
1627-
driver_block_device.get_volume_id(connection_info))
1628-
attachments = volume.get('attachments', {})
1629-
if len(attachments) > 1:
1630-
# First we get a list of all Server UUID's associated with
1631-
# this Host (Compute Node). We're going to use this to
1632-
# determine if the Volume being detached is also in-use by
1633-
# another Server on this Host, ie just check to see if more
1634-
# than one attachment.server_id for this volume is in our
1635-
# list of Server UUID's for this Host
1636-
servers_this_host = objects.InstanceList.get_uuids_by_host(
1637-
context, instance.host)
1638-
1639-
# NOTE(jdg): nova.volume.cinder translates the
1640-
# volume['attachments'] response into a dict which includes
1641-
# the Server UUID as the key, so we're using that
1642-
# here to check against our server_this_host list
1643-
for server_id, data in attachments.items():
1644-
if server_id in servers_this_host:
1645-
connection_count += 1
1624+
if not multiattach:
1625+
return True
1626+
1627+
# NOTE(deiter): Volume drivers using _HostMountStateManager are another
1628+
# special case. _HostMountStateManager ensures that the compute node
1629+
# only attempts to mount a single mountpoint in use by multiple
1630+
# attachments once, and that it is not unmounted until it is no longer
1631+
# in use by any attachments. So we can skip the multiattach check for
1632+
# volume drivers that based on LibvirtMountedFileSystemVolumeDriver.
1633+
if isinstance(vol_driver, fs.LibvirtMountedFileSystemVolumeDriver):
1634+
return True
1635+
1636+
connection_count = 0
1637+
volume = self._volume_api.get(context, volume_id)
1638+
attachments = volume.get('attachments', {})
1639+
if len(attachments) > 1:
1640+
# First we get a list of all Server UUID's associated with
1641+
# this Host (Compute Node). We're going to use this to
1642+
# determine if the Volume being detached is also in-use by
1643+
# another Server on this Host, ie just check to see if more
1644+
# than one attachment.server_id for this volume is in our
1645+
# list of Server UUID's for this Host
1646+
servers_this_host = objects.InstanceList.get_uuids_by_host(
1647+
context, instance.host)
1648+
1649+
# NOTE(jdg): nova.volume.cinder translates the
1650+
# volume['attachments'] response into a dict which includes
1651+
# the Server UUID as the key, so we're using that
1652+
# here to check against our server_this_host list
1653+
for server_id, data in attachments.items():
1654+
if server_id in servers_this_host:
1655+
connection_count += 1
16461656
return (False if connection_count > 1 else True)
16471657

16481658
def _disconnect_volume(self, context, connection_info, instance,
16491659
encryption=None):
16501660
self._detach_encryptor(context, connection_info, encryption=encryption)
1651-
if self._should_disconnect_target(context, connection_info, instance):
1652-
vol_driver = self._get_volume_driver(connection_info)
1661+
vol_driver = self._get_volume_driver(connection_info)
1662+
volume_id = driver_block_device.get_volume_id(connection_info)
1663+
multiattach = connection_info.get('multiattach', False)
1664+
if self._should_disconnect_target(
1665+
context, instance, multiattach, vol_driver, volume_id):
16531666
vol_driver.disconnect_volume(connection_info, instance)
16541667
else:
1655-
LOG.info("Detected multiple connections on this host for volume: "
1656-
"%s, skipping target disconnect.",
1657-
driver_block_device.get_volume_id(connection_info),
1658-
instance=instance)
1668+
LOG.info('Detected multiple connections on this host for '
1669+
'volume: %(volume)s, skipping target disconnect.',
1670+
{'volume': volume_id})
16591671

16601672
def _extend_volume(self, connection_info, instance, requested_size):
16611673
vol_driver = self._get_volume_driver(connection_info)
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
fixes:
3+
- |
4+
`Bug #1888022 <https://launchpad.net/bugs/1888022>`_:
5+
An issue that prevented detach of multi-attached fs-based volumes
6+
is resolved.

0 commit comments

Comments
 (0)