Skip to content

Commit 26f24b7

Browse files
Zuulopenstack-gerrit
authored andcommitted
Merge "Support evacuate with PCI in placement"
2 parents 5e5b675 + 5364276 commit 26f24b7

File tree

2 files changed

+190
-9
lines changed

2 files changed

+190
-9
lines changed

nova/conductor/manager.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1242,6 +1242,12 @@ def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
12421242
request_spec.requested_resources = res_req
12431243
request_spec.request_level_params = req_lvl_params
12441244

1245+
# NOTE(gibi): as PCI devices is tracked in placement we
1246+
# need to generate request groups from InstancePCIRequests.
1247+
# This will append new RequestGroup objects to the
1248+
# request_spec.requested_resources list if needed
1249+
request_spec.generate_request_groups_from_pci_requests()
1250+
12451251
try:
12461252
# if this is a rebuild of instance on the same host with
12471253
# new image.

nova/tests/functional/libvirt/test_pci_sriov_servers.py

Lines changed: 184 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -153,15 +153,7 @@ def _get_rp_by_name(self, name, rps):
153153
return rp
154154
self.fail(f'RP {name} is not found in Placement {rps}')
155155

156-
def assert_placement_pci_view(
157-
self, hostname, inventories, traits, usages=None, allocations=None
158-
):
159-
if not usages:
160-
usages = {}
161-
162-
if not allocations:
163-
allocations = {}
164-
156+
def assert_placement_pci_inventory(self, hostname, inventories, traits):
165157
compute_rp_uuid = self.compute_rp_uuids[hostname]
166158
rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
167159

@@ -201,6 +193,10 @@ def assert_placement_pci_view(
201193
f"Traits on RP {real_rp_name} does not match with expectation"
202194
)
203195

196+
def assert_placement_pci_usages(self, hostname, usages):
197+
compute_rp_uuid = self.compute_rp_uuids[hostname]
198+
rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
199+
204200
for rp_name, usage in usages.items():
205201
real_rp_name = f'{hostname}_{rp_name}'
206202
rp = self._get_rp_by_name(real_rp_name, rps)
@@ -211,6 +207,38 @@ def assert_placement_pci_view(
211207
f"Usage on RP {real_rp_name} does not match with expectation"
212208
)
213209

210+
def assert_placement_pci_allocations(self, allocations):
211+
for consumer, expected_allocations in allocations.items():
212+
actual_allocations = self._get_allocations_by_server_uuid(consumer)
213+
self.assertEqual(
214+
len(expected_allocations),
215+
len(actual_allocations),
216+
f"The consumer {consumer} allocates from different number of "
217+
f"RPs than expected. Expected: {expected_allocations}, "
218+
f"Actual: {actual_allocations}"
219+
)
220+
for rp_name, expected_rp_allocs in expected_allocations.items():
221+
rp_uuid = self._get_provider_uuid_by_name(rp_name)
222+
self.assertIn(
223+
rp_uuid,
224+
actual_allocations,
225+
f"The consumer {consumer} expected to allocate from "
226+
f"{rp_name}. Expected: {expected_allocations}, "
227+
f"Actual: {actual_allocations}"
228+
)
229+
actual_rp_allocs = actual_allocations[rp_uuid]['resources']
230+
self.assertEqual(
231+
expected_rp_allocs,
232+
actual_rp_allocs,
233+
f"The consumer {consumer} expected to have allocation "
234+
f"{expected_rp_allocs} on {rp_name} but it has "
235+
f"{actual_rp_allocs} instead."
236+
)
237+
238+
def assert_placement_pci_allocations_on_host(self, hostname, allocations):
239+
compute_rp_uuid = self.compute_rp_uuids[hostname]
240+
rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
241+
214242
for consumer, expected_allocations in allocations.items():
215243
actual_allocations = self._get_allocations_by_server_uuid(consumer)
216244
self.assertEqual(
@@ -242,6 +270,19 @@ def assert_placement_pci_view(
242270
f"{actual_rp_allocs} instead."
243271
)
244272

273+
def assert_placement_pci_view(
274+
self, hostname, inventories, traits, usages=None, allocations=None
275+
):
276+
if not usages:
277+
usages = {}
278+
279+
if not allocations:
280+
allocations = {}
281+
282+
self.assert_placement_pci_inventory(hostname, inventories, traits)
283+
self.assert_placement_pci_usages(hostname, usages)
284+
self.assert_placement_pci_allocations_on_host(hostname, allocations)
285+
245286
@staticmethod
246287
def _to_device_spec_conf(spec_list):
247288
return [jsonutils.dumps(x) for x in spec_list]
@@ -2520,6 +2561,140 @@ def test_request_two_pci_but_host_has_one(self):
25202561
self.assertIn('fault', server)
25212562
self.assertIn('No valid host', server['fault']['message'])
25222563

2564+
def _create_two_computes_and_an_instance_on_the_first(self):
2565+
self.start_compute(
2566+
hostname='test_compute0',
2567+
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
2568+
self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
2569+
test_compute0_placement_pci_view = {
2570+
"inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
2571+
"traits": {"0000:81:00.0": []},
2572+
"usages": {"0000:81:00.0": {self.PCI_RC: 0}},
2573+
"allocations": {},
2574+
}
2575+
self.assert_placement_pci_view(
2576+
"test_compute0", **test_compute0_placement_pci_view)
2577+
2578+
self.start_compute(
2579+
hostname='test_compute1',
2580+
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1),
2581+
)
2582+
self.assertPCIDeviceCounts('test_compute1', total=1, free=1)
2583+
test_compute1_placement_pci_view = {
2584+
"inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
2585+
"traits": {"0000:81:00.0": []},
2586+
"usages": {"0000:81:00.0": {self.PCI_RC: 0}},
2587+
"allocations": {},
2588+
}
2589+
self.assert_placement_pci_view(
2590+
"test_compute1", **test_compute1_placement_pci_view)
2591+
2592+
# boot a VM on test_compute0 with a single PCI dev
2593+
extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
2594+
pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
2595+
server = self._create_server(
2596+
flavor_id=pci_flavor_id, networks='none', host="test_compute0")
2597+
2598+
self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
2599+
test_compute0_placement_pci_view["usages"][
2600+
"0000:81:00.0"][self.PCI_RC] = 1
2601+
test_compute0_placement_pci_view["allocations"][
2602+
server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
2603+
self.assert_placement_pci_view(
2604+
"test_compute0", **test_compute0_placement_pci_view)
2605+
2606+
return (
2607+
server,
2608+
test_compute0_placement_pci_view,
2609+
test_compute1_placement_pci_view,
2610+
)
2611+
2612+
def test_evacuate(self):
2613+
(
2614+
server,
2615+
test_compute0_placement_pci_view,
2616+
test_compute1_placement_pci_view,
2617+
) = self._create_two_computes_and_an_instance_on_the_first()
2618+
2619+
# kill test_compute0 and evacuate the instance
2620+
self.computes['test_compute0'].stop()
2621+
self.api.put_service(
2622+
self.computes["test_compute0"].service_ref.uuid,
2623+
{"forced_down": True},
2624+
)
2625+
self._evacuate_server(server)
2626+
# source allocation should be kept as source is dead but the server
2627+
# now has allocation on both hosts as evacuation does not use migration
2628+
# allocations.
2629+
self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
2630+
self.assert_placement_pci_inventory(
2631+
"test_compute0",
2632+
test_compute0_placement_pci_view["inventories"],
2633+
test_compute0_placement_pci_view["traits"]
2634+
)
2635+
self.assert_placement_pci_usages(
2636+
"test_compute0", test_compute0_placement_pci_view["usages"]
2637+
)
2638+
self.assert_placement_pci_allocations(
2639+
{
2640+
server['id']: {
2641+
"test_compute0": {
2642+
"VCPU": 2,
2643+
"MEMORY_MB": 2048,
2644+
"DISK_GB": 20,
2645+
},
2646+
"test_compute0_0000:81:00.0": {self.PCI_RC: 1},
2647+
"test_compute1": {
2648+
"VCPU": 2,
2649+
"MEMORY_MB": 2048,
2650+
"DISK_GB": 20,
2651+
},
2652+
"test_compute1_0000:81:00.0": {self.PCI_RC: 1},
2653+
},
2654+
}
2655+
)
2656+
2657+
# dest allocation should be created
2658+
self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
2659+
test_compute1_placement_pci_view["usages"][
2660+
"0000:81:00.0"][self.PCI_RC] = 1
2661+
test_compute1_placement_pci_view["allocations"][
2662+
server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
2663+
self.assert_placement_pci_inventory(
2664+
"test_compute1",
2665+
test_compute1_placement_pci_view["inventories"],
2666+
test_compute1_placement_pci_view["traits"]
2667+
)
2668+
self.assert_placement_pci_usages(
2669+
"test_compute1", test_compute0_placement_pci_view["usages"]
2670+
)
2671+
2672+
# recover test_compute0 and check that it is cleaned
2673+
self.restart_compute_service('test_compute0')
2674+
2675+
self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
2676+
test_compute0_placement_pci_view = {
2677+
"inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
2678+
"traits": {"0000:81:00.0": []},
2679+
"usages": {"0000:81:00.0": {self.PCI_RC: 0}},
2680+
"allocations": {},
2681+
}
2682+
self.assert_placement_pci_view(
2683+
"test_compute0", **test_compute0_placement_pci_view)
2684+
2685+
# and test_compute1 is not changes (expect that the instance now has
2686+
# only allocation on this compute)
2687+
self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
2688+
test_compute1_placement_pci_view["usages"][
2689+
"0000:81:00.0"][self.PCI_RC] = 1
2690+
test_compute1_placement_pci_view["allocations"][
2691+
server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
2692+
self.assert_placement_pci_view(
2693+
"test_compute1", **test_compute1_placement_pci_view)
2694+
2695+
self.assert_no_pci_healing("test_compute0")
2696+
self.assert_no_pci_healing("test_compute1")
2697+
25232698

25242699
class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
25252700

0 commit comments

Comments
 (0)