Skip to content

Commit 1be3836

Browse files
Zuulopenstack-gerrit
authored andcommitted
Merge "Reproduce asym NUMA mixed CPU policy bug" into stable/2023.1
2 parents 07033d2 + 2cf835b commit 1be3836

File tree

1 file changed

+74
-0
lines changed

1 file changed

+74
-0
lines changed

nova/tests/functional/libvirt/test_numa_servers.py

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -346,6 +346,80 @@ def test_create_server_with_mixed_policy_fails(self):
346346
# There shouldn't be any hosts available to satisfy this request
347347
self._run_build_test(flavor_id, end_status='ERROR')
348348

349+
def test_create_server_with_mixed_policy_asymmetric_multi_numa(self):
350+
"""Boot an instance stretched to two NUMA nodes requesting only
351+
shared CPUs in one NUMA and only dedicated in the other NUMA node.
352+
"""
353+
# shared dedicated
354+
# NUMA0 pCPU | 0 | 2 3
355+
# NUMA1 pCPU | | 6 7
356+
self.flags(
357+
cpu_shared_set='0',
358+
cpu_dedicated_set='2,3,6,7',
359+
group='compute',
360+
)
361+
self.flags(vcpu_pin_set=None)
362+
363+
host_info = fakelibvirt.HostInfo(
364+
cpu_nodes=2, cpu_sockets=1, cpu_cores=4, cpu_threads=1)
365+
self.start_compute(host_info=host_info, hostname='compute1')
366+
367+
# sanity check the created host topology object; this is really just a
368+
# test of the fakelibvirt module
369+
host_numa = objects.NUMATopology.obj_from_db_obj(
370+
objects.ComputeNode.get_by_nodename(
371+
self.ctxt, 'compute1',
372+
).numa_topology
373+
)
374+
self.assertEqual(2, len(host_numa.cells))
375+
self.assertEqual({0}, host_numa.cells[0].cpuset)
376+
self.assertEqual({2, 3}, host_numa.cells[0].pcpuset)
377+
378+
self.assertEqual(set(), host_numa.cells[1].cpuset)
379+
self.assertEqual({6, 7}, host_numa.cells[1].pcpuset)
380+
381+
# create a flavor with 1 shared and 2 dedicated CPUs stretched to
382+
# different NUMA nodes
383+
extra_spec = {
384+
'hw:cpu_policy': 'mixed',
385+
'hw:cpu_dedicated_mask': '^0',
386+
'hw:numa_nodes': '2',
387+
'hw:numa_cpus.0': '0',
388+
'hw:numa_cpus.1': '1,2',
389+
'hw:numa_mem.0': '256',
390+
'hw:numa_mem.1': '768',
391+
}
392+
flavor_id = self._create_flavor(
393+
vcpu=3, memory_mb=1024, extra_spec=extra_spec)
394+
# The only possible solution (ignoring the order of vCPU1,2):
395+
# vCPU 0 => pCPU 0, NUMA0, shared
396+
# vCPU 1 => pCPU 6, NUMA1, dedicated
397+
# vCPU 2 => pCPU 7, NUMA1, dedicated
398+
# This is bug 1994526 as the scheduling fails
399+
self._run_build_test(flavor_id, end_status='ERROR')
400+
401+
# # After bug 1994526 is fixed, this should pass
402+
# expected_usage = {
403+
# 'DISK_GB': 20, 'MEMORY_MB': 1024, 'PCPU': 2, 'VCPU': 1,
404+
# }
405+
# server = self._run_build_test(
406+
# flavor_id, expected_usage=expected_usage)
407+
#
408+
# # sanity check the instance topology
409+
# inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
410+
# self.assertEqual(2, len(inst.numa_topology.cells))
411+
#
412+
# self.assertEqual({0}, inst.numa_topology.cells[0].cpuset)
413+
# self.assertEqual(set(), inst.numa_topology.cells[0].pcpuset)
414+
# self.assertEqual(None, inst.numa_topology.cells[0].cpu_pinning)
415+
#
416+
# self.assertEqual(set(), inst.numa_topology.cells[1].cpuset)
417+
# self.assertEqual({1, 2}, inst.numa_topology.cells[1].pcpuset)
418+
# self.assertEqual(
419+
# {6, 7},
420+
# set(inst.numa_topology.cells[1].cpu_pinning.values())
421+
# )
422+
349423
def test_create_server_with_dedicated_policy_old_configuration(self):
350424
"""Create a server using the legacy extra spec and configuration.
351425

0 commit comments

Comments
 (0)