@@ -4307,14 +4307,17 @@ def do_confirm_resize(context, instance, migration):
4307
4307
instance=instance)
4308
4308
finally:
4309
4309
# Whether an error occurred or not, at this point the
4310
- # instance is on the dest host so to avoid leaking
4311
- # allocations in placement, delete them here.
4310
+ # instance is on the dest host. Avoid leaking allocations
4311
+ # in placement by deleting them here.. .
4312
4312
self._delete_allocation_after_move(
4313
4313
context, instance, migration)
4314
- # Also as the instance is not any more on this host, update
4315
- # the scheduler about the move
4314
+ # ...inform the scheduler about the move...
4316
4315
self._delete_scheduler_instance_info(
4317
4316
context, instance.uuid)
4317
+ # ...and unset the cached flavor information (this is done
4318
+ # last since the resource tracker relies on it for its
4319
+ # periodic tasks)
4320
+ self._delete_stashed_flavor_info(instance)
4318
4321
4319
4322
do_confirm_resize(context, instance, migration)
4320
4323
@@ -4353,13 +4356,6 @@ def _confirm_resize(self, context, instance, migration=None):
4353
4356
self.host, action=fields.NotificationAction.RESIZE_CONFIRM,
4354
4357
phase=fields.NotificationPhase.START)
4355
4358
4356
- # NOTE(danms): delete stashed migration information
4357
- old_instance_type = instance.old_flavor
4358
- instance.old_flavor = None
4359
- instance.new_flavor = None
4360
- instance.system_metadata.pop('old_vm_state', None)
4361
- instance.save()
4362
-
4363
4359
# NOTE(tr3buchet): tear down networks on source host
4364
4360
self.network_api.setup_networks_on_host(context, instance,
4365
4361
migration.source_compute, teardown=True)
@@ -4383,8 +4379,9 @@ def _confirm_resize(self, context, instance, migration=None):
4383
4379
# instance.migration_context so make sure to not call
4384
4380
# instance.drop_migration_context() until after drop_move_claim
4385
4381
# is called.
4386
- self.rt.drop_move_claim(context, instance, migration.source_node,
4387
- old_instance_type, prefix='old_')
4382
+ self.rt.drop_move_claim(
4383
+ context, instance, migration.source_node, instance.old_flavor,
4384
+ prefix='old_')
4388
4385
instance.drop_migration_context()
4389
4386
4390
4387
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
@@ -4435,6 +4432,13 @@ def _delete_allocation_after_move(self, context, instance, migration):
4435
4432
'migration_uuid': migration.uuid})
4436
4433
raise
4437
4434
4435
+ def _delete_stashed_flavor_info(self, instance):
4436
+ """Remove information about the flavor change after a resize."""
4437
+ instance.old_flavor = None
4438
+ instance.new_flavor = None
4439
+ instance.system_metadata.pop('old_vm_state', None)
4440
+ instance.save()
4441
+
4438
4442
@wrap_exception()
4439
4443
@wrap_instance_event(prefix='compute')
4440
4444
@errors_out_migration
@@ -4687,6 +4691,13 @@ def _revert_snapshot_based_resize_at_dest(
4687
4691
self.rt.drop_move_claim(ctxt, instance, instance.node,
4688
4692
instance_type=instance.new_flavor)
4689
4693
4694
+ def _revert_instance_flavor_host_node(self, instance, migration):
4695
+ """Revert host, node and flavor fields after a resize-revert."""
4696
+ self._set_instance_info(instance, instance.old_flavor)
4697
+ instance.host = migration.source_compute
4698
+ instance.node = migration.source_node
4699
+ instance.save(expected_task_state=[task_states.RESIZE_REVERTING])
4700
+
4690
4701
@wrap_exception()
4691
4702
@reverts_task_state
4692
4703
@wrap_instance_event(prefix='compute')
@@ -4715,7 +4726,11 @@ def do_revert():
4715
4726
with self._error_out_instance_on_exception(ctxt, instance):
4716
4727
self._finish_revert_snapshot_based_resize_at_source(
4717
4728
ctxt, instance, migration)
4718
- do_revert()
4729
+
4730
+ try:
4731
+ do_revert()
4732
+ finally:
4733
+ self._delete_stashed_flavor_info(instance)
4719
4734
4720
4735
# Broadcast to all schedulers that the instance is on this host.
4721
4736
# This is best effort so if anything fails just log it.
@@ -4737,16 +4752,14 @@ def _finish_revert_snapshot_based_resize_at_source(
4737
4752
task_state is "resize_reverting".
4738
4753
:param migration: Migration object whose status is "reverting".
4739
4754
"""
4740
- # Delete stashed old_vm_state information. We will use this to
4741
- # determine if the guest should be powered on when we spawn it.
4742
- old_vm_state = instance.system_metadata.pop(
4755
+ # Get stashed old_vm_state information to determine if guest should
4756
+ # be powered on after spawn; we default to ACTIVE for backwards
4757
+ # compatibility if old_vm_state is not set
4758
+ old_vm_state = instance.system_metadata.get(
4743
4759
'old_vm_state', vm_states.ACTIVE)
4744
4760
4745
- # Update instance host/node and flavor-related fields. After this
4746
- # if anything fails the instance will get rebuilt/rebooted on this
4747
- # host.
4748
- self._finish_revert_resize_update_instance_flavor_host_node(
4749
- instance, migration)
4761
+ # Revert the flavor and host/node fields to their previous values
4762
+ self._revert_instance_flavor_host_node(instance, migration)
4750
4763
4751
4764
# Move the allocations against the source compute node resource
4752
4765
# provider, held by the migration, to the instance which will drop
@@ -4936,27 +4949,6 @@ def _finish_revert_resize_network_migrate_finish(
4936
4949
LOG.error('Timeout waiting for Neutron events: %s', events,
4937
4950
instance=instance)
4938
4951
4939
- def _finish_revert_resize_update_instance_flavor_host_node(self, instance,
4940
- migration):
4941
- """Updates host/node and flavor-related fields on the instance.
4942
-
4943
- This is used when finish the revert resize operation on the source
4944
- host and updates the instance flavor-related fields back to the old
4945
- flavor and then nulls out the old/new_flavor fields.
4946
-
4947
- The instance host/node fields are also set back to the source compute
4948
- host/node.
4949
-
4950
- :param instance: Instance object
4951
- :param migration: Migration object
4952
- """
4953
- self._set_instance_info(instance, instance.old_flavor)
4954
- instance.old_flavor = None
4955
- instance.new_flavor = None
4956
- instance.host = migration.source_compute
4957
- instance.node = migration.source_node
4958
- instance.save(expected_task_state=[task_states.RESIZE_REVERTING])
4959
-
4960
4952
@wrap_exception()
4961
4953
@reverts_task_state
4962
4954
@wrap_instance_event(prefix='compute')
@@ -4970,6 +4962,16 @@ def finish_revert_resize(
4970
4962
revert the resized attributes in the database.
4971
4963
4972
4964
"""
4965
+ try:
4966
+ self._finish_revert_resize(
4967
+ context, instance, migration, request_spec)
4968
+ finally:
4969
+ self._delete_stashed_flavor_info(instance)
4970
+
4971
+ def _finish_revert_resize(
4972
+ self, context, instance, migration, request_spec=None,
4973
+ ):
4974
+ """Inner version of finish_revert_resize."""
4973
4975
with self._error_out_instance_on_exception(context, instance):
4974
4976
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
4975
4977
context, instance.uuid)
@@ -4979,14 +4981,14 @@ def finish_revert_resize(
4979
4981
self.host, action=fields.NotificationAction.RESIZE_REVERT,
4980
4982
phase=fields.NotificationPhase.START, bdms=bdms)
4981
4983
4982
- # NOTE(mriedem): delete stashed old_vm_state information; we
4983
- # default to ACTIVE for backwards compatibility if old_vm_state
4984
- # is not set
4985
- old_vm_state = instance.system_metadata.pop('old_vm_state',
4986
- vm_states.ACTIVE)
4984
+ # Get stashed old_vm_state information to determine if guest should
4985
+ # be powered on after spawn; we default to ACTIVE for backwards
4986
+ # compatibility if old_vm_state is not set
4987
+ old_vm_state = instance.system_metadata.get(
4988
+ 'old_vm_state', vm_states.ACTIVE)
4987
4989
4988
- self._finish_revert_resize_update_instance_flavor_host_node(
4989
- instance, migration)
4990
+ # Revert the flavor and host/node fields to their previous values
4991
+ self._revert_instance_flavor_host_node( instance, migration)
4990
4992
4991
4993
try:
4992
4994
source_allocations = self._revert_allocation(
0 commit comments