Skip to content

Commit a1fa92f

Browse files
committed
Added context manager for instance lock
Moved lock and unlock instance code to context manager. Updated _refresh volume attachment method to use instance lock context manager Now there will be a single request ID for the lock, refresh, and unlock actions. Earlier, the volume_attachment refresh operation used to have a unique req-id for each action. Related-Bug: #2012365 Change-Id: I6588836c3484a26d67a5995710761f0f6b6a4c18 (cherry picked from commit 211737d) (cherry picked from commit 9eebaf3)
1 parent fab81c3 commit a1fa92f

File tree

2 files changed

+175
-116
lines changed

2 files changed

+175
-116
lines changed

nova/cmd/manage.py

Lines changed: 121 additions & 105 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
"""
2323

2424
import collections
25+
from contextlib import contextmanager
2526
import functools
2627
import os
2728
import re
@@ -144,6 +145,37 @@ def format_dict(dct, dict_property="Property", dict_value='Value',
144145
return encodeutils.safe_encode(pt.get_string()).decode()
145146

146147

148+
@contextmanager
149+
def locked_instance(cell_mapping, instance, reason):
150+
"""Context manager to lock and unlock instance,
151+
lock state will be restored regardless of the success or failure
152+
of target functionality.
153+
154+
:param cell_mapping: instance-cell-mapping
155+
:param instance: instance to be lock and unlock
156+
:param reason: reason, why lock is required
157+
"""
158+
159+
compute_api = api.API()
160+
161+
initial_state = 'locked' if instance.locked else 'unlocked'
162+
if not instance.locked:
163+
with context.target_cell(
164+
context.get_admin_context(),
165+
cell_mapping
166+
) as cctxt:
167+
compute_api.lock(cctxt, instance, reason=reason)
168+
try:
169+
yield
170+
finally:
171+
if initial_state == 'unlocked':
172+
with context.target_cell(
173+
context.get_admin_context(),
174+
cell_mapping
175+
) as cctxt:
176+
compute_api.unlock(cctxt, instance)
177+
178+
147179
class DbCommands(object):
148180
"""Class for managing the main database."""
149181

@@ -2998,10 +3030,8 @@ def _refresh(self, instance_uuid, volume_id, connector):
29983030
:param instance_uuid: UUID of instance
29993031
:param volume_id: ID of volume attached to the instance
30003032
:param connector: Connector with which to create the new attachment
3033+
:return status_code: volume-refresh status_code 0 on success
30013034
"""
3002-
volume_api = cinder.API()
3003-
compute_rpcapi = rpcapi.ComputeAPI()
3004-
compute_api = api.API()
30053035

30063036
ctxt = context.get_admin_context()
30073037
im = objects.InstanceMapping.get_by_instance_uuid(ctxt, instance_uuid)
@@ -3017,111 +3047,97 @@ def _refresh(self, instance_uuid, volume_id, connector):
30173047
state=instance.vm_state,
30183048
method='refresh connection_info (must be stopped)')
30193049

3020-
if instance.locked:
3021-
raise exception.InstanceInvalidState(
3022-
instance_uuid=instance_uuid, attr='locked', state='True',
3023-
method='refresh connection_info (must be unlocked)')
3024-
3025-
compute_api.lock(
3026-
cctxt, instance,
3027-
reason=(
3028-
f'Refreshing connection_info for BDM {bdm.uuid} '
3029-
f'associated with instance {instance_uuid} and volume '
3030-
f'{volume_id}.'))
3031-
3032-
# NOTE(lyarwood): Yes this is weird but we need to recreate the admin
3033-
# context here to ensure the lock above uses a unique request-id
3034-
# versus the following refresh and eventual unlock.
3035-
ctxt = context.get_admin_context()
3036-
with context.target_cell(ctxt, im.cell_mapping) as cctxt:
3037-
instance_action = None
3038-
new_attachment_id = None
3050+
locking_reason = (
3051+
f'Refreshing connection_info for BDM {bdm.uuid} '
3052+
f'associated with instance {instance_uuid} and volume '
3053+
f'{volume_id}.')
3054+
3055+
with locked_instance(im.cell_mapping, instance, locking_reason):
3056+
return self._do_refresh(
3057+
cctxt, instance, volume_id, bdm, connector)
3058+
3059+
def _do_refresh(self, cctxt, instance,
3060+
volume_id, bdm, connector):
3061+
volume_api = cinder.API()
3062+
compute_rpcapi = rpcapi.ComputeAPI()
3063+
3064+
new_attachment_id = None
3065+
try:
3066+
# Log this as an instance action so operators and users are
3067+
# aware that this has happened.
3068+
instance_action = objects.InstanceAction.action_start(
3069+
cctxt, instance.uuid,
3070+
instance_actions.NOVA_MANAGE_REFRESH_VOLUME_ATTACHMENT)
3071+
3072+
# Create a blank attachment to keep the volume reserved
3073+
new_attachment_id = volume_api.attachment_create(
3074+
cctxt, volume_id, instance.uuid)['id']
3075+
3076+
# RPC call to the compute to cleanup the connections, which
3077+
# will in turn unmap the volume from the compute host
3078+
# TODO(lyarwood): Add delete_attachment as a kwarg to
3079+
# remove_volume_connection as is available in the private
3080+
# method within the manager.
3081+
compute_rpcapi.remove_volume_connection(
3082+
cctxt, instance, volume_id, instance.host)
3083+
3084+
# Delete the existing volume attachment if present in the bdm.
3085+
# This isn't present when the original attachment was made
3086+
# using the legacy cinderv2 APIs before the cinderv3 attachment
3087+
# based APIs were present.
3088+
if bdm.attachment_id:
3089+
volume_api.attachment_delete(cctxt, bdm.attachment_id)
3090+
3091+
# Update the attachment with host connector, this regenerates
3092+
# the connection_info that we can now stash in the bdm.
3093+
new_connection_info = volume_api.attachment_update(
3094+
cctxt, new_attachment_id, connector,
3095+
bdm.device_name)['connection_info']
3096+
3097+
# Before we save it to the BDM ensure the serial is stashed as
3098+
# is done in various other codepaths when attaching volumes.
3099+
if 'serial' not in new_connection_info:
3100+
new_connection_info['serial'] = bdm.volume_id
3101+
3102+
# Save the new attachment id and connection_info to the DB
3103+
bdm.attachment_id = new_attachment_id
3104+
bdm.connection_info = jsonutils.dumps(new_connection_info)
3105+
bdm.save()
3106+
3107+
# Finally mark the attachment as complete, moving the volume
3108+
# status from attaching to in-use ahead of the instance
3109+
# restarting
3110+
volume_api.attachment_complete(cctxt, new_attachment_id)
3111+
return 0
3112+
3113+
finally:
3114+
# If the bdm.attachment_id wasn't updated make sure we clean
3115+
# up any attachments created during the run.
3116+
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
3117+
cctxt, volume_id, instance.uuid)
3118+
if (
3119+
new_attachment_id and
3120+
bdm.attachment_id != new_attachment_id
3121+
):
3122+
volume_api.attachment_delete(cctxt, new_attachment_id)
3123+
3124+
# If we failed during attachment_update the bdm.attachment_id
3125+
# has already been deleted so recreate it now to ensure the
3126+
# volume is still associated with the instance and clear the
3127+
# now stale connection_info.
30393128
try:
3040-
# Log this as an instance action so operators and users are
3041-
# aware that this has happened.
3042-
instance_action = objects.InstanceAction.action_start(
3043-
cctxt, instance_uuid,
3044-
instance_actions.NOVA_MANAGE_REFRESH_VOLUME_ATTACHMENT)
3045-
3046-
# Create a blank attachment to keep the volume reserved
3047-
new_attachment_id = volume_api.attachment_create(
3048-
cctxt, volume_id, instance_uuid)['id']
3049-
3050-
# RPC call to the compute to cleanup the connections, which
3051-
# will in turn unmap the volume from the compute host
3052-
# TODO(lyarwood): Add delete_attachment as a kwarg to
3053-
# remove_volume_connection as is available in the private
3054-
# method within the manager.
3055-
compute_rpcapi.remove_volume_connection(
3056-
cctxt, instance, volume_id, instance.host)
3057-
3058-
# Delete the existing volume attachment if present in the bdm.
3059-
# This isn't present when the original attachment was made
3060-
# using the legacy cinderv2 APIs before the cinderv3 attachment
3061-
# based APIs were present.
3062-
if bdm.attachment_id:
3063-
volume_api.attachment_delete(cctxt, bdm.attachment_id)
3064-
3065-
# Update the attachment with host connector, this regenerates
3066-
# the connection_info that we can now stash in the bdm.
3067-
new_connection_info = volume_api.attachment_update(
3068-
cctxt, new_attachment_id, connector,
3069-
bdm.device_name)['connection_info']
3070-
3071-
# Before we save it to the BDM ensure the serial is stashed as
3072-
# is done in various other codepaths when attaching volumes.
3073-
if 'serial' not in new_connection_info:
3074-
new_connection_info['serial'] = bdm.volume_id
3075-
3076-
# Save the new attachment id and connection_info to the DB
3077-
bdm.attachment_id = new_attachment_id
3078-
bdm.connection_info = jsonutils.dumps(new_connection_info)
3129+
volume_api.attachment_get(cctxt, bdm.attachment_id)
3130+
except exception.VolumeAttachmentNotFound:
3131+
bdm.attachment_id = volume_api.attachment_create(
3132+
cctxt, volume_id, instance.uuid)['id']
3133+
bdm.connection_info = None
30793134
bdm.save()
30803135

3081-
# Finally mark the attachment as complete, moving the volume
3082-
# status from attaching to in-use ahead of the instance
3083-
# restarting
3084-
volume_api.attachment_complete(cctxt, new_attachment_id)
3085-
return 0
3086-
3087-
finally:
3088-
# If the bdm.attachment_id wasn't updated make sure we clean
3089-
# up any attachments created during the run.
3090-
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
3091-
cctxt, volume_id, instance_uuid)
3092-
if (
3093-
new_attachment_id and
3094-
bdm.attachment_id != new_attachment_id
3095-
):
3096-
volume_api.attachment_delete(cctxt, new_attachment_id)
3097-
3098-
# If we failed during attachment_update the bdm.attachment_id
3099-
# has already been deleted so recreate it now to ensure the
3100-
# volume is still associated with the instance and clear the
3101-
# now stale connection_info.
3102-
try:
3103-
volume_api.attachment_get(cctxt, bdm.attachment_id)
3104-
except exception.VolumeAttachmentNotFound:
3105-
bdm.attachment_id = volume_api.attachment_create(
3106-
cctxt, volume_id, instance_uuid)['id']
3107-
bdm.connection_info = None
3108-
bdm.save()
3109-
3110-
# Finish the instance action if it was created and started
3111-
# TODO(lyarwood): While not really required we should store
3112-
# the exec and traceback in here on failure.
3113-
if instance_action:
3114-
instance_action.finish()
3115-
3116-
# NOTE(lyarwood): As above we need to unlock the instance with
3117-
# a fresh context and request-id to keep it unique. It's safe
3118-
# to assume that the instance is locked as this point as the
3119-
# earlier call to lock isn't part of this block.
3120-
with context.target_cell(
3121-
context.get_admin_context(),
3122-
im.cell_mapping
3123-
) as u_cctxt:
3124-
compute_api.unlock(u_cctxt, instance)
3136+
# Finish the instance action if it was created and started
3137+
# TODO(lyarwood): While not really required we should store
3138+
# the exec and traceback in here on failure.
3139+
if instance_action:
3140+
instance_action.finish()
31253141

31263142
@action_description(
31273143
_("Refresh the connection info for a given volume attachment"))

nova/tests/unit/cmd/test_manage.py

Lines changed: 54 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3472,6 +3472,7 @@ def test_refresh_invalid_connector_path_file(self, mock_exists):
34723472
def _test_refresh(self, mock_exists):
34733473
ctxt = context.get_admin_context()
34743474
cell_ctxt = context.get_admin_context()
3475+
cell_ctxt.cell_uuid = '39fd7a1f-db62-45bc-a7b6-8137cef87c9d'
34753476
fake_connector = self._get_fake_connector_info()
34763477

34773478
mock_exists.return_value = True
@@ -3528,11 +3529,14 @@ def test_refresh_invalid_instance_state(
35283529
output = self.output.getvalue().strip()
35293530
self.assertIn('must be stopped', output)
35303531

3532+
@mock.patch.object(objects.InstanceAction, 'action_start')
3533+
@mock.patch.object(manage.VolumeAttachmentCommands, '_do_refresh')
35313534
@mock.patch.object(
35323535
objects.BlockDeviceMapping, 'get_by_volume_and_instance')
35333536
@mock.patch.object(objects.Instance, 'get_by_uuid')
3534-
def test_refresh_instance_already_locked_failure(
3535-
self, mock_get_instance, mock_get_bdm
3537+
def test_refresh_instance_already_locked(
3538+
self, mock_get_instance, mock_get_bdm,
3539+
mock__do_refresh, mock_action_start
35363540
):
35373541
"""Test refresh with instance when instance is already locked."""
35383542
mock_get_instance.return_value = objects.Instance(
@@ -3542,11 +3546,11 @@ def test_refresh_instance_already_locked_failure(
35423546
mock_get_bdm.return_value = objects.BlockDeviceMapping(
35433547
uuid=uuidsentinel.bdm, volume_id=uuidsentinel.volume,
35443548
attachment_id=uuidsentinel.instance)
3549+
mock_action = mock.Mock(spec=objects.InstanceAction)
3550+
mock_action_start.return_value = mock_action
35453551

3546-
ret = self._test_refresh()
3547-
self.assertEqual(5, ret)
3548-
output = self.output.getvalue().strip()
3549-
self.assertIn('must be unlocked', output)
3552+
self._test_refresh()
3553+
mock__do_refresh.assert_called_once()
35503554

35513555
@mock.patch.object(
35523556
objects.BlockDeviceMapping, 'get_by_volume_and_instance')
@@ -3573,9 +3577,9 @@ def test_refresh_invalid_volume_id(self, mock_get_instance, mock_get_bdm):
35733577
@mock.patch.object(objects.Instance, 'get_by_uuid')
35743578
@mock.patch.object(objects.InstanceAction, 'action_start')
35753579
def test_refresh_attachment_unknown_failure(
3576-
self, mock_action_start, mock_get_instance, mock_get_bdm, mock_lock,
3577-
mock_unlock, mock_attachment_create, mock_attachment_delete,
3578-
mock_attachment_get
3580+
self, mock_action_start, mock_get_instance,
3581+
mock_get_bdm, mock_lock, mock_unlock, mock_attachment_create,
3582+
mock_attachment_delete, mock_attachment_get
35793583
):
35803584
"""Test refresh with instance when any other error happens.
35813585
"""
@@ -3615,8 +3619,9 @@ def test_refresh_attachment_unknown_failure(
36153619
@mock.patch.object(objects.Instance, 'get_by_uuid')
36163620
@mock.patch.object(objects.InstanceAction, 'action_start')
36173621
def test_refresh(
3618-
self, mock_action_start, mock_get_instance, mock_get_bdm,
3619-
mock_save_bdm, mock_compute_api, mock_volume_api, mock_compute_rpcapi
3622+
self, mock_action_start, mock_get_instance,
3623+
mock_get_bdm, mock_save_bdm, mock_compute_api, mock_volume_api,
3624+
mock_compute_rpcapi
36203625
):
36213626
"""Test refresh with a successful code path."""
36223627
fake_compute_api = mock_compute_api.return_value
@@ -3695,6 +3700,44 @@ def test_error_post_mortem(self, mock_conf, mock_parse_args, mock_pm):
36953700
self.assertEqual(255, manage.main())
36963701
self.assertTrue(mock_pm.called)
36973702

3703+
def _lock_instance(self, ctxt, instance, reason):
3704+
instance.locked = True
3705+
3706+
def _unlock_instance(self, ctxt, instance):
3707+
instance.locked = False
3708+
3709+
def test_locked_instance(self):
3710+
cm = objects.CellMapping(name='foo', uuid=uuidsentinel.cell)
3711+
proj_uuid = uuidutils.generate_uuid()
3712+
instance = objects.Instance(
3713+
project_id=proj_uuid, uuid=uuidsentinel.instance)
3714+
instance.locked = True
3715+
3716+
with mock.patch('nova.compute.api.API') as mock_api:
3717+
mock_api.return_value.lock.side_effect = self._lock_instance
3718+
mock_api.return_value.unlock.side_effect = self._unlock_instance
3719+
3720+
with manage.locked_instance(cm, instance, 'some'):
3721+
self.assertTrue(instance.locked)
3722+
3723+
self.assertTrue(instance.locked)
3724+
3725+
def test_unlocked_instance(self):
3726+
cm = objects.CellMapping(name='foo', uuid=uuidsentinel.cell)
3727+
proj_uuid = uuidutils.generate_uuid()
3728+
instance = objects.Instance(
3729+
project_id=proj_uuid, uuid=uuidsentinel.instance)
3730+
instance.locked = False
3731+
3732+
with mock.patch('nova.compute.api.API') as mock_api:
3733+
mock_api.return_value.lock.side_effect = self._lock_instance
3734+
mock_api.return_value.unlock.side_effect = self._unlock_instance
3735+
3736+
with manage.locked_instance(cm, instance, 'some'):
3737+
self.assertTrue(instance.locked)
3738+
3739+
self.assertFalse(instance.locked)
3740+
36983741

36993742
class LibvirtCommandsTestCase(test.NoDBTestCase):
37003743

0 commit comments

Comments
 (0)