Skip to content

Commit 690ce37

Browse files
committed
objects: Replace 'cpu_pinning_requested' helper
This helper avoided a slightly long-winded check for what was essentially a binary condition before. With the future introduction of a 'mixed' policy, this isn't always going to be suitable going forward. Replace it with explicit checks for types. Part of blueprint use-pcpu-and-vcpu-in-one-instance Change-Id: If7512c998bca770889d81012a17a1a2978cae68e Signed-off-by: Stephen Finucane <[email protected]>
1 parent dff70b3 commit 690ce37

File tree

7 files changed

+94
-42
lines changed

7 files changed

+94
-42
lines changed

nova/compute/manager.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -828,9 +828,11 @@ def _validate_pinning_configuration(self, instances):
828828
# if this is an unpinned instance and the host only has
829829
# 'cpu_dedicated_set' configured, we need to tell the operator to
830830
# correct their configuration
831-
if not (instance.numa_topology and
832-
instance.numa_topology.cpu_pinning_requested):
833-
831+
if not instance.numa_topology or (
832+
instance.numa_topology.cpu_policy in (
833+
None, fields.CPUAllocationPolicy.SHARED
834+
)
835+
):
834836
# we don't need to check 'vcpu_pin_set' since it can't coexist
835837
# alongside 'cpu_dedicated_set'
836838
if (CONF.compute.cpu_dedicated_set and

nova/objects/instance_numa.py

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -82,10 +82,6 @@ def siblings(self):
8282

8383
return list(map(set, zip(*[iter(cpu_list)] * threads)))
8484

85-
@property
86-
def cpu_pinning_requested(self):
87-
return self.cpu_policy == obj_fields.CPUAllocationPolicy.DEDICATED
88-
8985
def pin(self, vcpu, pcpu):
9086
if vcpu not in self.cpuset:
9187
return
@@ -205,17 +201,26 @@ def __len__(self):
205201
"""Defined so that boolean testing works the same as for lists."""
206202
return len(self.cells)
207203

204+
# TODO(stephenfin): We should add a real 'cpu_policy' field on this object
205+
# and deprecate the one found on the cell
206+
@property
207+
def cpu_policy(self):
208+
cpu_policy = set(cell.cpu_policy for cell in self.cells)
209+
if len(cpu_policy) > 1:
210+
# NOTE(stephenfin): This should never happen in real life; it's to
211+
# prevent programmer error.
212+
raise exception.InternalError(
213+
'Instance NUMA cells must have the same CPU policy.'
214+
)
215+
return cpu_policy.pop()
216+
208217
@property
209218
def cpu_pinning(self):
210219
"""Return a set of all host CPUs this NUMATopology is pinned to."""
211220
return set(itertools.chain.from_iterable([
212221
cell.cpu_pinning.values() for cell in self.cells
213222
if cell.cpu_pinning]))
214223

215-
@property
216-
def cpu_pinning_requested(self):
217-
return all(cell.cpu_pinning_requested for cell in self.cells)
218-
219224
def clear_host_pinning(self):
220225
"""Clear any data related to how instance is pinned to the host.
221226

nova/tests/unit/objects/test_instance_numa.py

Lines changed: 41 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,10 @@
1010
# License for the specific language governing permissions and limitations
1111
# under the License.
1212

13-
import copy
14-
1513
import mock
1614
from oslo_utils.fixture import uuidsentinel as uuids
1715
from oslo_versionedobjects import base as ovo_base
16+
import testtools
1817

1918
from nova import exception
2019
from nova import objects
@@ -94,13 +93,6 @@ def test_pin_vcpus(self):
9493
inst_cell.pin_vcpus((0, 14), (1, 15), (2, 16), (3, 17))
9594
self.assertEqual({0: 14, 1: 15, 2: 16, 3: 17}, inst_cell.cpu_pinning)
9695

97-
def test_cpu_pinning_requested(self):
98-
inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
99-
cpu_pinning=None)
100-
self.assertFalse(inst_cell.cpu_pinning_requested)
101-
inst_cell.cpu_policy = fields.CPUAllocationPolicy.DEDICATED
102-
self.assertTrue(inst_cell.cpu_pinning_requested)
103-
10496
def test_cpu_pinning(self):
10597
topo_obj = get_fake_obj_numa_topology(self.context)
10698
self.assertEqual(set(), topo_obj.cpu_pinning)
@@ -192,12 +184,46 @@ def test_get_by_instance_uuid_missing(self, mock_get):
192184
objects.InstanceNUMATopology.get_by_instance_uuid,
193185
self.context, 'fake_uuid')
194186

195-
def test_cpu_pinning_requested(self):
196-
fake_topo_obj = copy.deepcopy(fake_obj_numa_topology)
197-
self.assertFalse(fake_topo_obj.cpu_pinning_requested)
198-
for cell in fake_topo_obj.cells:
199-
cell.cpu_policy = fields.CPUAllocationPolicy.DEDICATED
200-
self.assertTrue(fake_topo_obj.cpu_pinning_requested)
187+
def test_cpu_policy(self):
188+
cpu_policy = fields.CPUAllocationPolicy.SHARED
189+
topology = objects.InstanceNUMATopology(
190+
instance_uuid=fake_instance_uuid,
191+
cells=[
192+
objects.InstanceNUMACell(
193+
cpuset=set([0, 1, 2, 3]),
194+
cpu_pinning=None,
195+
cpu_policy=cpu_policy,
196+
),
197+
objects.InstanceNUMACell(
198+
cpuset=set([4, 5, 6, 7]),
199+
cpu_pinning=None,
200+
cpu_policy=cpu_policy,
201+
),
202+
],
203+
)
204+
205+
self.assertEqual(cpu_policy, topology.cpu_policy)
206+
207+
def test_cpu_policy__error(self):
208+
"""Ensure we raise an error if cells have different values."""
209+
topology = objects.InstanceNUMATopology(
210+
instance_uuid=fake_instance_uuid,
211+
cells=[
212+
objects.InstanceNUMACell(
213+
cpuset=set([0, 1, 2, 3]),
214+
cpu_pinning=None,
215+
cpu_policy=None,
216+
),
217+
objects.InstanceNUMACell(
218+
cpuset=set([4, 5, 6, 7]),
219+
cpu_pinning=None,
220+
cpu_policy=fields.CPUAllocationPolicy.SHARED
221+
),
222+
],
223+
)
224+
225+
with testtools.ExpectedException(exception.InternalError):
226+
topology.cpu_policy
201227

202228
def test_cpuset_reserved(self):
203229
topology = objects.InstanceNUMATopology(

nova/tests/unit/scheduler/filters/test_numa_topology_filters.py

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212

1313
import itertools
1414

15-
import mock
1615
from oslo_utils.fixture import uuidsentinel as uuids
1716

1817
from nova import objects
@@ -128,16 +127,24 @@ def test_numa_topology_filter_pass_set_limit(self):
128127
self.assertEqual(limits.cpu_allocation_ratio, 21)
129128
self.assertEqual(limits.ram_allocation_ratio, 1.3)
130129

131-
@mock.patch('nova.objects.instance_numa.InstanceNUMACell'
132-
'.cpu_pinning_requested',
133-
return_value=True)
134130
def _do_test_numa_topology_filter_cpu_policy(
135-
self, numa_topology, cpu_policy, cpu_thread_policy, passes,
136-
mock_pinning_requested):
137-
instance_topology = objects.InstanceNUMATopology(
138-
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
139-
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
140-
])
131+
self, numa_topology, cpu_policy, cpu_thread_policy, passes):
132+
instance_topology = objects.InstanceNUMATopology(cells=[
133+
objects.InstanceNUMACell(
134+
id=0,
135+
cpuset=set([1]),
136+
memory=512,
137+
cpu_policy=cpu_policy,
138+
cpu_thread_policy=cpu_thread_policy,
139+
),
140+
objects.InstanceNUMACell(
141+
id=1,
142+
cpuset=set([3]),
143+
memory=512,
144+
cpu_policy=cpu_policy,
145+
cpu_thread_policy=cpu_thread_policy,
146+
),
147+
])
141148
spec_obj = objects.RequestSpec(numa_topology=instance_topology,
142149
pci_requests=None,
143150
instance_uuid=uuids.fake)

nova/virt/hardware.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1126,7 +1126,7 @@ def _numa_fit_instance_cell(host_cell, instance_cell, limit_cell=None,
11261126

11271127
# NOTE(stephenfin): As with memory, do not allow an instance to overcommit
11281128
# against itself on any NUMA cell
1129-
if instance_cell.cpu_pinning_requested:
1129+
if instance_cell.cpu_policy == fields.CPUAllocationPolicy.DEDICATED:
11301130
# TODO(stephenfin): Is 'cpuset_reserved' present if consuming emulator
11311131
# threads from shared CPU pools? If so, we don't want to add this here
11321132
required_cpus = len(instance_cell.cpuset) + cpuset_reserved
@@ -1151,7 +1151,7 @@ def _numa_fit_instance_cell(host_cell, instance_cell, limit_cell=None,
11511151
})
11521152
return
11531153

1154-
if instance_cell.cpu_pinning_requested:
1154+
if instance_cell.cpu_policy == fields.CPUAllocationPolicy.DEDICATED:
11551155
LOG.debug('Pinning has been requested')
11561156
new_instance_cell = _numa_fit_instance_cell_with_pinning(
11571157
host_cell, instance_cell, cpuset_reserved)
@@ -2256,7 +2256,9 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
22562256

22572257
memory_usage = memory_usage + sign * instance_cell.memory
22582258

2259-
if not instance_cell.cpu_pinning_requested:
2259+
if instance_cell.cpu_policy != (
2260+
fields.CPUAllocationPolicy.DEDICATED
2261+
):
22602262
shared_cpus_usage += sign * len(instance_cell.cpuset)
22612263
continue
22622264

nova/virt/hyperv/vmops.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -464,7 +464,13 @@ def _get_instance_vnuma_config(self, instance, image_meta):
464464
memory_per_numa_node = instance_topology.cells[0].memory
465465
cpus_per_numa_node = len(instance_topology.cells[0].cpuset)
466466

467-
if instance_topology.cpu_pinning_requested:
467+
# TODO(stephenfin): We can avoid this check entirely if we rely on the
468+
# 'supports_pcpus' driver capability (via a trait), but we need to drop
469+
# support for the legacy 'vcpu_pin_set' path in the libvirt driver
470+
# first
471+
if instance_topology.cpu_policy not in (
472+
None, fields.CPUAllocationPolicy.SHARED,
473+
):
468474
raise exception.InstanceUnacceptable(
469475
reason=_("Hyper-V does not support CPU pinning."),
470476
instance_id=instance.uuid)

nova/virt/libvirt/driver.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8187,7 +8187,9 @@ def _update_provider_tree_for_pcpu(self, provider_tree, nodename,
81878187
if not instance.numa_topology:
81888188
continue
81898189

8190-
if not instance.numa_topology.cpu_pinning_requested:
8190+
if instance.numa_topology.cpu_policy != (
8191+
fields.CPUAllocationPolicy.DEDICATED
8192+
):
81918193
continue
81928194

81938195
allocations_needing_reshape.append(instance.uuid)
@@ -8209,7 +8211,9 @@ def _update_provider_tree_for_pcpu(self, provider_tree, nodename,
82098211
if not instance.numa_topology:
82108212
continue
82118213

8212-
if not instance.numa_topology.cpu_pinning_requested:
8214+
if instance.numa_topology.cpu_policy != (
8215+
fields.CPUAllocationPolicy.DEDICATED
8216+
):
82138217
continue
82148218

82158219
allocations_needing_reshape.append(migration.uuid)

0 commit comments

Comments
 (0)