125125from nova .virt .libvirt .storage import rbd_utils
126126from nova .virt .libvirt import utils as libvirt_utils
127127from nova .virt .libvirt import vif as libvirt_vif
128+ from nova .virt .libvirt .volume import fs
128129from nova .virt .libvirt .volume import mount
129130from nova .virt .libvirt .volume import remotefs
130131from nova .virt import netutils
@@ -1609,9 +1610,8 @@ def _connect_volume(self, context, connection_info, instance,
16091610 "volume connection" , instance = instance )
16101611 vol_driver .disconnect_volume (connection_info , instance )
16111612
1612- def _should_disconnect_target (self , context , connection_info , instance ):
1613- connection_count = 0
1614-
1613+ def _should_disconnect_target (self , context , instance , multiattach ,
1614+ vol_driver , volume_id ):
16151615 # NOTE(jdg): Multiattach is a special case (not to be confused
16161616 # with shared_targets). With multiattach we may have a single volume
16171617 # attached multiple times to *this* compute node (ie Server-1 and
@@ -1621,41 +1621,53 @@ def _should_disconnect_target(self, context, connection_info, instance):
16211621 # will indiscriminantly delete the connection for all Server and that's
16221622 # no good. So check if it's attached multiple times on this node
16231623 # if it is we skip the call to brick to delete the connection.
1624- if connection_info .get ('multiattach' , False ):
1625- volume = self ._volume_api .get (
1626- context ,
1627- driver_block_device .get_volume_id (connection_info ))
1628- attachments = volume .get ('attachments' , {})
1629- if len (attachments ) > 1 :
1630- # First we get a list of all Server UUID's associated with
1631- # this Host (Compute Node). We're going to use this to
1632- # determine if the Volume being detached is also in-use by
1633- # another Server on this Host, ie just check to see if more
1634- # than one attachment.server_id for this volume is in our
1635- # list of Server UUID's for this Host
1636- servers_this_host = objects .InstanceList .get_uuids_by_host (
1637- context , instance .host )
1638-
1639- # NOTE(jdg): nova.volume.cinder translates the
1640- # volume['attachments'] response into a dict which includes
1641- # the Server UUID as the key, so we're using that
1642- # here to check against our server_this_host list
1643- for server_id , data in attachments .items ():
1644- if server_id in servers_this_host :
1645- connection_count += 1
1624+ if not multiattach :
1625+ return True
1626+
1627+ # NOTE(deiter): Volume drivers using _HostMountStateManager are another
1628+ # special case. _HostMountStateManager ensures that the compute node
1629+ # only attempts to mount a single mountpoint in use by multiple
1630+ # attachments once, and that it is not unmounted until it is no longer
1631+ # in use by any attachments. So we can skip the multiattach check for
1632+ # volume drivers that based on LibvirtMountedFileSystemVolumeDriver.
1633+ if isinstance (vol_driver , fs .LibvirtMountedFileSystemVolumeDriver ):
1634+ return True
1635+
1636+ connection_count = 0
1637+ volume = self ._volume_api .get (context , volume_id )
1638+ attachments = volume .get ('attachments' , {})
1639+ if len (attachments ) > 1 :
1640+ # First we get a list of all Server UUID's associated with
1641+ # this Host (Compute Node). We're going to use this to
1642+ # determine if the Volume being detached is also in-use by
1643+ # another Server on this Host, ie just check to see if more
1644+ # than one attachment.server_id for this volume is in our
1645+ # list of Server UUID's for this Host
1646+ servers_this_host = objects .InstanceList .get_uuids_by_host (
1647+ context , instance .host )
1648+
1649+ # NOTE(jdg): nova.volume.cinder translates the
1650+ # volume['attachments'] response into a dict which includes
1651+ # the Server UUID as the key, so we're using that
1652+ # here to check against our server_this_host list
1653+ for server_id , data in attachments .items ():
1654+ if server_id in servers_this_host :
1655+ connection_count += 1
16461656 return (False if connection_count > 1 else True )
16471657
16481658 def _disconnect_volume (self , context , connection_info , instance ,
16491659 encryption = None ):
16501660 self ._detach_encryptor (context , connection_info , encryption = encryption )
1651- if self ._should_disconnect_target (context , connection_info , instance ):
1652- vol_driver = self ._get_volume_driver (connection_info )
1661+ vol_driver = self ._get_volume_driver (connection_info )
1662+ volume_id = driver_block_device .get_volume_id (connection_info )
1663+ multiattach = connection_info .get ('multiattach' , False )
1664+ if self ._should_disconnect_target (
1665+ context , instance , multiattach , vol_driver , volume_id ):
16531666 vol_driver .disconnect_volume (connection_info , instance )
16541667 else :
1655- LOG .info ("Detected multiple connections on this host for volume: "
1656- "%s, skipping target disconnect." ,
1657- driver_block_device .get_volume_id (connection_info ),
1658- instance = instance )
1668+ LOG .info ('Detected multiple connections on this host for '
1669+ 'volume: %(volume)s, skipping target disconnect.' ,
1670+ {'volume' : volume_id })
16591671
16601672 def _extend_volume (self , connection_info , instance , requested_size ):
16611673 vol_driver = self ._get_volume_driver (connection_info )
0 commit comments