@@ -265,6 +265,27 @@ def inner(self, context, instance, *args, **kw):
265
265
return outer
266
266
267
267
268
+ def reject_vdpa_instances (operation ):
269
+ """Reject requests to decorated function if instance has vDPA interfaces.
270
+
271
+ Raise OperationNotSupportedForVDPAInterfaces if operations involves one or
272
+ more vDPA interfaces.
273
+ """
274
+
275
+ def outer (f ):
276
+ @functools .wraps (f )
277
+ def inner (self , context , instance , * args , ** kw ):
278
+ if any (
279
+ vif ['vnic_type' ] == network_model .VNIC_TYPE_VDPA
280
+ for vif in instance .get_network_info ()
281
+ ):
282
+ raise exception .OperationNotSupportedForVDPAInterface (
283
+ instance_uuid = instance .uuid , operation = operation )
284
+ return f (self , context , instance , * args , ** kw )
285
+ return inner
286
+ return outer
287
+
288
+
268
289
def load_cells ():
269
290
global CELLS
270
291
if not CELLS :
@@ -3948,6 +3969,9 @@ def _validate_host_for_cold_migrate(
3948
3969
3949
3970
# TODO(stephenfin): This logic would be so much easier to grok if we
3950
3971
# finally split resize and cold migration into separate code paths
3972
+ # FIXME(sean-k-mooney): Cold migrate and resize to different hosts
3973
+ # probably works but they have not been tested so block them for now
3974
+ @reject_vdpa_instances (instance_actions .RESIZE )
3951
3975
@block_accelerators ()
3952
3976
@check_instance_lock
3953
3977
@check_instance_state (vm_state = [vm_states .ACTIVE , vm_states .STOPPED ])
@@ -3962,6 +3986,7 @@ def resize(self, context, instance, flavor_id=None, clean_shutdown=True,
3962
3986
host_name is always None in the resize case.
3963
3987
host_name can be set in the cold migration case only.
3964
3988
"""
3989
+
3965
3990
allow_cross_cell_resize = self ._allow_cross_cell_resize (
3966
3991
context , instance )
3967
3992
@@ -4165,6 +4190,9 @@ def _allow_resize_to_same_host(self, cold_migrate, instance):
4165
4190
allow_same_host = CONF .allow_resize_to_same_host
4166
4191
return allow_same_host
4167
4192
4193
+ # FIXME(sean-k-mooney): Shelve works but unshelve does not due to bug
4194
+ # #1851545, so block it for now
4195
+ @reject_vdpa_instances (instance_actions .SHELVE )
4168
4196
@reject_vtpm_instances (instance_actions .SHELVE )
4169
4197
@block_accelerators (until_service = 54 )
4170
4198
@check_instance_lock
@@ -4184,7 +4212,6 @@ def shelve(self, context, instance, clean_shutdown=True):
4184
4212
instance .system_metadata .update (
4185
4213
{'image_base_image_ref' : instance .image_ref }
4186
4214
)
4187
-
4188
4215
instance .save (expected_task_state = [None ])
4189
4216
4190
4217
self ._record_action_start (context , instance , instance_actions .SHELVE )
@@ -4352,6 +4379,10 @@ def get_instance_diagnostics(self, context, instance):
4352
4379
return self .compute_rpcapi .get_instance_diagnostics (context ,
4353
4380
instance = instance )
4354
4381
4382
+ # FIXME(sean-k-mooney): Suspend does not work because we do not unplug
4383
+ # the vDPA devices before calling managed save as we do with SR-IOV
4384
+ # devices
4385
+ @reject_vdpa_instances (instance_actions .SUSPEND )
4355
4386
@block_accelerators ()
4356
4387
@reject_sev_instances (instance_actions .SUSPEND )
4357
4388
@check_instance_lock
@@ -5015,19 +5046,27 @@ def attach_interface(self, context, instance, network_id, port_id,
5015
5046
self ._record_action_start (
5016
5047
context , instance , instance_actions .ATTACH_INTERFACE )
5017
5048
5018
- # NOTE(gibi): Checking if the requested port has resource request as
5019
- # such ports are only supported if the compute service version is >= 55
5020
- # TODO(gibi): Remove this check in X as there we can be sure that all
5021
- # computes are new enough
5022
5049
if port_id :
5023
- port = self .network_api .show_port (context , port_id )
5024
- if port ['port' ].get (constants .RESOURCE_REQUEST ):
5050
+ port = self .network_api .show_port (context , port_id )['port' ]
5051
+ # NOTE(gibi): Checking if the requested port has resource request
5052
+ # as such ports are only supported if the compute service version
5053
+ # is >= 55.
5054
+ # TODO(gibi): Remove this check in X as there we can be sure
5055
+ # that all computes are new enough.
5056
+ if port .get (constants .RESOURCE_REQUEST ):
5025
5057
svc = objects .Service .get_by_host_and_binary (
5026
5058
context , instance .host , 'nova-compute' )
5027
5059
if svc .version < 55 :
5028
5060
raise exception .AttachInterfaceWithQoSPolicyNotSupported (
5029
5061
instance_uuid = instance .uuid )
5030
5062
5063
+ if port .get ('binding:vnic_type' , "normal" ) == "vdpa" :
5064
+ # FIXME(sean-k-mooney): Attach works but detach results in a
5065
+ # QEMU error; blocked until this is resolved
5066
+ raise exception .OperationNotSupportedForVDPAInterface (
5067
+ instance_uuid = instance .uuid ,
5068
+ operation = instance_actions .ATTACH_INTERFACE )
5069
+
5031
5070
return self .compute_rpcapi .attach_interface (context ,
5032
5071
instance = instance , network_id = network_id , port_id = port_id ,
5033
5072
requested_ip = requested_ip , tag = tag )
@@ -5038,6 +5077,29 @@ def attach_interface(self, context, instance, network_id, port_id,
5038
5077
task_state = [None ])
5039
5078
def detach_interface (self , context , instance , port_id ):
5040
5079
"""Detach an network adapter from an instance."""
5080
+
5081
+ # FIXME(sean-k-mooney): Detach currently results in a failure to remove
5082
+ # the interface from the live libvirt domain, so while the networking
5083
+ # is torn down on the host the vDPA device is still attached to the VM.
5084
+ # This is likely a libvirt/qemu bug so block detach until that is
5085
+ # resolved.
5086
+ for vif in instance .get_network_info ():
5087
+ if vif ['id' ] == port_id :
5088
+ if vif ['vnic_type' ] == 'vdpa' :
5089
+ raise exception .OperationNotSupportedForVDPAInterface (
5090
+ instance_uuid = instance .uuid ,
5091
+ operation = instance_actions .DETACH_INTERFACE )
5092
+ break
5093
+ else :
5094
+ # NOTE(sean-k-mooney) This should never happen but just in case the
5095
+ # info cache does not have the port we are detaching we can fall
5096
+ # back to neutron.
5097
+ port = self .network_api .show_port (context , port_id )['port' ]
5098
+ if port .get ('binding:vnic_type' , 'normal' ) == 'vdpa' :
5099
+ raise exception .OperationNotSupportedForVDPAInterface (
5100
+ instance_uuid = instance .uuid ,
5101
+ operation = instance_actions .DETACH_INTERFACE )
5102
+
5041
5103
self ._record_action_start (
5042
5104
context , instance , instance_actions .DETACH_INTERFACE )
5043
5105
self .compute_rpcapi .detach_interface (context , instance = instance ,
@@ -5079,6 +5141,7 @@ def update_instance_metadata(self, context, instance,
5079
5141
5080
5142
return _metadata
5081
5143
5144
+ @reject_vdpa_instances (instance_actions .LIVE_MIGRATION )
5082
5145
@block_accelerators ()
5083
5146
@reject_vtpm_instances (instance_actions .LIVE_MIGRATION )
5084
5147
@reject_sev_instances (instance_actions .LIVE_MIGRATION )
@@ -5210,6 +5273,8 @@ def live_migrate_abort(self, context, instance, migration_id,
5210
5273
self .compute_rpcapi .live_migration_abort (context ,
5211
5274
instance , migration .id )
5212
5275
5276
+ # FIXME(sean-k-mooney): rebuild works but we have not tested evacuate yet
5277
+ @reject_vdpa_instances (instance_actions .EVACUATE )
5213
5278
@reject_vtpm_instances (instance_actions .EVACUATE )
5214
5279
@block_accelerators (until_service = SUPPORT_ACCELERATOR_SERVICE_FOR_REBUILD )
5215
5280
@check_instance_state (vm_state = [vm_states .ACTIVE , vm_states .STOPPED ,
0 commit comments