Skip to content

Commit bbf626c

Browse files
Zuulopenstack-gerrit
authored andcommitted
Merge "Clean up when queued live migration aborted" into stable/wallaby
2 parents 38a6d04 + 750b364 commit bbf626c

File tree

3 files changed

+69
-39
lines changed

3 files changed

+69
-39
lines changed

nova/compute/manager.py

Lines changed: 35 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -8564,15 +8564,41 @@ def live_migration_abort(self, context, instance, migration_id):
85648564
migration, future = (
85658565
self._waiting_live_migrations.pop(instance.uuid))
85668566
if future and future.cancel():
8567-
# If we got here, we've successfully aborted the queued
8568-
# migration and _do_live_migration won't run so we need
8569-
# to set the migration status to cancelled and send the
8570-
# notification. If Future.cancel() fails, it means
8571-
# _do_live_migration is running and the migration status
8572-
# is preparing, and _do_live_migration() itself will attempt
8573-
# to pop the queued migration, hit a KeyError, and rollback,
8574-
# set the migration to cancelled and send the
8575-
# live.migration.abort.end notification.
8567+
# If we got here, we've successfully dropped a queued
8568+
# migration from the queue, so _do_live_migration won't run
8569+
# and we only need to revert minor changes introduced by Nova
8570+
# control plane (port bindings, resource allocations and
8571+
# instance's PCI devices), restore VM's state, set the
8572+
# migration's status to cancelled and send the notification.
8573+
# If Future.cancel() fails, it means _do_live_migration is
8574+
# running and the migration status is preparing, and
8575+
# _do_live_migration() itself will attempt to pop the queued
8576+
# migration, hit a KeyError, and rollback, set the migration
8577+
# to cancelled and send the live.migration.abort.end
8578+
# notification.
8579+
self._revert_allocation(context, instance, migration)
8580+
try:
8581+
# This call will delete any inactive destination host
8582+
# port bindings.
8583+
self.network_api.setup_networks_on_host(
8584+
context, instance, host=migration.dest_compute,
8585+
teardown=True)
8586+
except exception.PortBindingDeletionFailed as e:
8587+
# Removing the inactive port bindings from the destination
8588+
# host is not critical so just log an error but don't fail.
8589+
LOG.error(
8590+
'Network cleanup failed for destination host %s '
8591+
'during live migration rollback. You may need to '
8592+
'manually clean up resources in the network service. '
8593+
'Error: %s', migration.dest_compute, str(e))
8594+
except Exception:
8595+
with excutils.save_and_reraise_exception():
8596+
LOG.exception(
8597+
'An error occurred while cleaning up networking '
8598+
'during live migration rollback.',
8599+
instance=instance)
8600+
instance.task_state = None
8601+
instance.save(expected_task_state=[task_states.MIGRATING])
85768602
self._set_migration_status(migration, 'cancelled')
85778603
except KeyError:
85788604
migration = objects.Migration.get_by_id(context, migration_id)

nova/tests/functional/libvirt/test_live_migration.py

Lines changed: 17 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -117,16 +117,12 @@ def test_queued_live_migration_abort_vm_status(self):
117117
'/servers/%s/migrations/%s' % (self.server_b['id'],
118118
serverb_migration['id']))
119119
self._wait_for_migration_status(self.server_b, ['cancelled'])
120-
# Unlock live migrations and confirm that server_a becomes
121-
# active again after successful live migration
120+
# Unlock live migrations and confirm that both servers become
121+
# active again after successful (server_a) and aborted
122+
# (server_b) live migrations
122123
self.lock_live_migration.release()
123124
self._wait_for_state_change(self.server_a, 'ACTIVE')
124-
125-
# FIXME(artom) Assert the server_b never comes out of 'MIGRATING'
126-
self.assertRaises(
127-
AssertionError,
128-
self._wait_for_state_change, self.server_b, 'ACTIVE')
129-
self._wait_for_state_change(self.server_b, 'MIGRATING')
125+
self._wait_for_state_change(self.server_b, 'ACTIVE')
130126

131127

132128
class LiveMigrationQueuedAbortTestLeftoversRemoved(LiveMigrationWithLockBase):
@@ -182,36 +178,28 @@ def test_queued_live_migration_abort_leftovers_removed(self):
182178
'/servers/%s/migrations/%s' % (self.server_b['id'],
183179
migration_server_b['id']))
184180
self._wait_for_migration_status(self.server_b, ['cancelled'])
185-
# Unlock live migrations and confirm that server_a becomes
186-
# active again after successful live migration
181+
# Unlock live migrations and confirm that both servers become
182+
# active again after successful (server_a) and aborted
183+
# (server_b) live migrations
187184
self.lock_live_migration.release()
188185
self._wait_for_state_change(self.server_a, 'ACTIVE')
189186
self._wait_for_migration_status(self.server_a, ['completed'])
190-
# FIXME(astupnikov) Assert the server_b never comes out of 'MIGRATING'
191-
# This should be fixed after bug #1949808 is addressed
192-
self._wait_for_state_change(self.server_b, 'MIGRATING')
187+
self._wait_for_state_change(self.server_b, 'ACTIVE')
193188

194-
# FIXME(astupnikov) Because of bug #1960412 allocations for aborted
195-
# queued live migration (server_b) would not be removed. Allocations
196-
# for completed live migration (server_a) should be empty.
189+
# Allocations for both successful (server_a) and aborted queued live
190+
# migration (server_b) should be removed.
197191
allocations_server_a_migration = self.placement.get(
198192
'/allocations/%s' % migration_server_a['uuid']
199193
).body['allocations']
200194
self.assertEqual({}, allocations_server_a_migration)
201195
allocations_server_b_migration = self.placement.get(
202196
'/allocations/%s' % migration_server_b['uuid']
203197
).body['allocations']
204-
src_uuid = self.api.api_get(
205-
'os-hypervisors?hypervisor_hostname_pattern=%s' %
206-
self.src_hostname).body['hypervisors'][0]['id']
207-
self.assertIn(src_uuid, allocations_server_b_migration)
208-
209-
# FIXME(astupnikov) Because of bug #1960412 INACTIVE port binding
210-
# on destination host would not be removed when queued live migration
211-
# is aborted, so 2 port bindings would exist for server_b port from
212-
# Neutron's perspective.
213-
# server_a should be migrated to dest compute, server_b should still
214-
# be hosted by src compute.
198+
self.assertEqual({}, allocations_server_b_migration)
199+
200+
# INACTIVE port binding on destination host should be removed when
201+
# queued live migration is aborted, so only 1 port binding would
202+
# exist for ports attached to both servers.
215203
port_binding_server_a = copy.deepcopy(
216204
self.neutron._port_bindings[self.neutron.port_1['id']]
217205
)
@@ -220,4 +208,5 @@ def test_queued_live_migration_abort_leftovers_removed(self):
220208
port_binding_server_b = copy.deepcopy(
221209
self.neutron._port_bindings[self.neutron.port_2['id']]
222210
)
223-
self.assertEqual(2, len(port_binding_server_b))
211+
self.assertEqual(1, len(port_binding_server_b))
212+
self.assertNotIn('dest', port_binding_server_b)

nova/tests/unit/compute/test_compute_mgr.py

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10352,19 +10352,34 @@ def test_live_migration_abort(self, mock_notify_action, mock_driver,
1035210352
action='live_migration_abort', phase='end')]
1035310353
)
1035410354

10355+
@mock.patch.object(objects.Instance, 'save')
10356+
@mock.patch.object(manager.ComputeManager, '_revert_allocation')
1035510357
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
1035610358
@mock.patch.object(objects.Migration, 'get_by_id')
1035710359
@mock.patch('nova.compute.utils.notify_about_instance_action')
1035810360
def test_live_migration_abort_queued(self, mock_notify_action,
10359-
mock_get_migration, mock_notify):
10361+
mock_get_migration, mock_notify,
10362+
mock_revert_allocations,
10363+
mock_instance_save):
1036010364
instance = objects.Instance(id=123, uuid=uuids.instance)
1036110365
migration = self._get_migration(10, 'queued', 'live-migration')
10366+
migration.dest_compute = uuids.dest
10367+
migration.dest_node = uuids.dest
1036210368
migration.save = mock.MagicMock()
1036310369
mock_get_migration.return_value = migration
1036410370
fake_future = mock.MagicMock()
1036510371
self.compute._waiting_live_migrations[instance.uuid] = (
1036610372
migration, fake_future)
10367-
self.compute.live_migration_abort(self.context, instance, migration.id)
10373+
with mock.patch.object(
10374+
self.compute.network_api,
10375+
'setup_networks_on_host') as mock_setup_net:
10376+
self.compute.live_migration_abort(
10377+
self.context, instance, migration.id)
10378+
mock_setup_net.assert_called_once_with(
10379+
self.context, instance, host=migration.dest_compute,
10380+
teardown=True)
10381+
mock_revert_allocations.assert_called_once_with(
10382+
self.context, instance, migration)
1036810383
mock_notify.assert_has_calls(
1036910384
[mock.call(self.context, instance,
1037010385
'live.migration.abort.start'),

0 commit comments

Comments
 (0)