Skip to content

Commit 5516304

Browse files
authored
Merge pull request #78 from stackhpc/upstream/2023.1-2024-06-25
Synchronise 2023.1 with upstream
2 parents 691091a + 3226318 commit 5516304

28 files changed

+627
-273
lines changed

doc/source/admin/cpu-topologies.rst

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -772,6 +772,11 @@ while there could be other tools outside Nova to manage the governor, like
772772
tuned. That being said, we also provide a way to automatically change the
773773
governors on the fly, as explained below.
774774

775+
.. important::
776+
Some OS platforms don't support `cpufreq` resources in sysfs, so the
777+
``governor`` strategy could be not available. Please verify if your OS
778+
supports scaling govenors before modifying the configuration option.
779+
775780
If the strategy is set to ``governor``, a couple of config options are provided
776781
to define which exact CPU govenor to use for each of the up and down states :
777782

nova/cmd/manage.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3195,7 +3195,7 @@ class ImagePropertyCommands:
31953195
'instance_uuid', metavar='<instance_uuid>',
31963196
help='UUID of the instance')
31973197
@args(
3198-
'property', metavar='<image_property>',
3198+
'image_property', metavar='<image_property>',
31993199
help='Image property to show')
32003200
def show(self, instance_uuid=None, image_property=None):
32013201
"""Show value of a given instance image property.
@@ -3213,10 +3213,10 @@ def show(self, instance_uuid=None, image_property=None):
32133213
with context.target_cell(ctxt, im.cell_mapping) as cctxt:
32143214
instance = objects.Instance.get_by_uuid(
32153215
cctxt, instance_uuid, expected_attrs=['system_metadata'])
3216-
image_property = instance.system_metadata.get(
3216+
property_value = instance.system_metadata.get(
32173217
f'image_{image_property}')
3218-
if image_property:
3219-
print(image_property)
3218+
if property_value:
3219+
print(property_value)
32203220
return 0
32213221
else:
32223222
print(f'Image property {image_property} not found '

nova/cmd/status.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ def _check_machine_type_set(self):
266266
your environment does not contain libvirt based compute hosts.
267267
Use the `nova-manage machine_type list_unset` command to list these instances.
268268
For more details see the following:
269-
https://docs.openstack.org/latest/nova/admin/hw_machine_type.html"""))
269+
https://docs.openstack.org/nova/latest/admin/hw-machine-type.html"""))
270270
return upgradecheck.Result(upgradecheck.Code.WARNING, msg)
271271

272272
return upgradecheck.Result(upgradecheck.Code.SUCCESS)
@@ -276,7 +276,7 @@ def _check_service_user_token(self):
276276
msg = (_("""
277277
Service user token configuration is required for all Nova services.
278278
For more details see the following:
279-
https://docs.openstack.org/latest/nova/admin/configuration/service-user-token.html""")) # noqa
279+
https://docs.openstack.org/nova/latest/admin/configuration/service-user-token.html""")) # noqa
280280
return upgradecheck.Result(upgradecheck.Code.FAILURE, msg)
281281
return upgradecheck.Result(upgradecheck.Code.SUCCESS)
282282

nova/compute/manager.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9095,12 +9095,16 @@ def _live_migration_cleanup_flags(self, migrate_data, migr_ctxt=None):
90959095
objects.LibvirtVPMEMDevice)):
90969096
has_vpmem = True
90979097
break
9098+
power_management_possible = (
9099+
'dst_numa_info' in migrate_data and
9100+
migrate_data.dst_numa_info is not None)
90989101
# No instance booting at source host, but instance dir
90999102
# must be deleted for preparing next block migration
91009103
# must be deleted for preparing next live migration w/o shared
91019104
# storage
91029105
# vpmem must be cleaned
9103-
do_cleanup = not migrate_data.is_shared_instance_path or has_vpmem
9106+
do_cleanup = (not migrate_data.is_shared_instance_path or
9107+
has_vpmem or power_management_possible)
91049108
destroy_disks = not migrate_data.is_shared_block_storage
91059109
elif isinstance(migrate_data, migrate_data_obj.HyperVLiveMigrateData):
91069110
# NOTE(claudiub): We need to cleanup any zombie Planned VM.

nova/network/neutron.py

Lines changed: 9 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -3610,6 +3610,7 @@ def _get_subnets_from_port(self, context, port, client=None):
36103610
'gateway': network_model.IP(
36113611
address=subnet['gateway_ip'],
36123612
type='gateway'),
3613+
'enable_dhcp': False,
36133614
}
36143615
if subnet.get('ipv6_address_mode'):
36153616
subnet_dict['ipv6_address_mode'] = subnet['ipv6_address_mode']
@@ -3626,22 +3627,14 @@ def _get_subnets_from_port(self, context, port, client=None):
36263627
subnet_dict['dhcp_server'] = ip_pair['ip_address']
36273628
break
36283629

3629-
# NOTE(arnaudmorin): If enable_dhcp is set on subnet, but, for
3630-
# some reason neutron did not have any DHCP port yet, we still
3631-
# want the network_info to be populated with a valid dhcp_server
3632-
# value. This is mostly useful for the metadata API (which is
3633-
# relying on this value to give network_data to the instance).
3634-
#
3635-
# This will also help some providers which are using external
3636-
# DHCP servers not handled by neutron.
3637-
# In this case, neutron will never create any DHCP port in the
3638-
# subnet.
3639-
#
3640-
# Also note that we cannot set the value to None because then the
3641-
# value would be discarded by the metadata API.
3642-
# So the subnet gateway will be used as fallback.
3643-
if subnet.get('enable_dhcp') and 'dhcp_server' not in subnet_dict:
3644-
subnet_dict['dhcp_server'] = subnet['gateway_ip']
3630+
# NOTE(stblatzheim): If enable_dhcp is set on subnet, but subnet
3631+
# has ovn native dhcp and no dhcp-agents. Network owner will be
3632+
# network:distributed
3633+
# Just rely on enable_dhcp flag given by neutron
3634+
# Fix for https://bugs.launchpad.net/nova/+bug/2055245
3635+
3636+
if subnet.get('enable_dhcp'):
3637+
subnet_dict['enable_dhcp'] = True
36453638

36463639
subnet_object = network_model.Subnet(**subnet_dict)
36473640
for dns in subnet.get('dns_nameservers', []):

nova/objects/instance_numa.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -293,6 +293,12 @@ def cpu_pinning(self):
293293
cell.cpu_pinning.values() for cell in self.cells
294294
if cell.cpu_pinning]))
295295

296+
@property
297+
def cpuset_reserved(self):
298+
return set(itertools.chain.from_iterable([
299+
cell.cpuset_reserved for cell in self.cells
300+
if cell.cpuset_reserved]))
301+
296302
def clear_host_pinning(self):
297303
"""Clear any data related to how instance is pinned to the host.
298304

nova/scheduler/utils.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1080,6 +1080,17 @@ def validate_weigher(weigher):
10801080
_SUPPORTS_SOFT_ANTI_AFFINITY = None
10811081

10821082

1083+
def reset_globals():
1084+
global _SUPPORTS_AFFINITY
1085+
_SUPPORTS_AFFINITY = None
1086+
global _SUPPORTS_ANTI_AFFINITY
1087+
_SUPPORTS_ANTI_AFFINITY = None
1088+
global _SUPPORTS_SOFT_AFFINITY
1089+
_SUPPORTS_SOFT_AFFINITY = None
1090+
global _SUPPORTS_SOFT_ANTI_AFFINITY
1091+
_SUPPORTS_SOFT_ANTI_AFFINITY = None
1092+
1093+
10831094
def _get_group_details(context, instance_uuid, user_group_hosts=None):
10841095
"""Provide group_hosts and group_policies sets related to instances if
10851096
those instances are belonging to a group and if corresponding filters are

nova/test.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@
6262
from nova.objects import base as objects_base
6363
from nova import quota
6464
from nova.scheduler.client import report
65+
from nova.scheduler import utils as scheduler_utils
6566
from nova.tests import fixtures as nova_fixtures
6667
from nova.tests.unit import matchers
6768
from nova import utils
@@ -310,6 +311,19 @@ def setUp(self):
310311
if self.STUB_COMPUTE_ID:
311312
self.useFixture(nova_fixtures.ComputeNodeIdFixture())
312313

314+
# Reset globals indicating affinity filter support. Some tests may set
315+
# self.flags(enabled_filters=...) which could make the affinity filter
316+
# support globals get set to a non-default configuration which affects
317+
# all other tests.
318+
scheduler_utils.reset_globals()
319+
320+
# Wait for bare greenlets spawn_n()'ed from a GreenThreadPoolExecutor
321+
# to finish before moving on from the test. When greenlets from a
322+
# previous test remain running, they may attempt to access structures
323+
# (like the database) that have already been torn down and can cause
324+
# the currently running test to fail.
325+
self.useFixture(nova_fixtures.GreenThreadPoolShutdownWait())
326+
313327
def _setup_cells(self):
314328
"""Setup a normal cellsv2 environment.
315329

nova/tests/fixtures/filesystem.py

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -39,10 +39,14 @@ def _setUp(self):
3939

4040

4141
class SysFileSystemFixture(TempFileSystemFixture):
42-
"""Creates a fake /sys filesystem"""
42+
def __init__(self, cpus_supported=None, cpufreq_enabled=True):
43+
"""Instantiates a fake sysfs.
4344
44-
def __init__(self, cpus_supported=None):
45+
:param cpus_supported: number of devices/system/cpu (default: 10)
46+
:param cpufreq_enabled: cpufreq subdir created (default: True)
47+
"""
4548
self.cpus_supported = cpus_supported or 10
49+
self.cpufreq_enabled = cpufreq_enabled
4650

4751
def _setUp(self):
4852
super()._setUp()
@@ -73,9 +77,12 @@ def _setUp(self):
7377

7478
for cpu_nr in range(self.cpus_supported):
7579
cpu_dir = os.path.join(self.cpu_path_mock % {'core': cpu_nr})
76-
os.makedirs(os.path.join(cpu_dir, 'cpufreq'))
77-
filesystem.write_sys(
78-
os.path.join(cpu_dir, 'cpufreq/scaling_governor'),
79-
data='powersave')
80+
os.makedirs(cpu_dir)
81+
filesystem.write_sys(os.path.join(cpu_dir, 'online'), data='1')
82+
if self.cpufreq_enabled:
83+
os.makedirs(os.path.join(cpu_dir, 'cpufreq'))
84+
filesystem.write_sys(
85+
os.path.join(cpu_dir, 'cpufreq/scaling_governor'),
86+
data='powersave')
8087
filesystem.write_sys(core.AVAILABLE_PATH,
8188
f'0-{self.cpus_supported - 1}')

nova/tests/fixtures/nova.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1938,3 +1938,38 @@ def setUp(self):
19381938
'nova.compute.manager.ComputeManager.'
19391939
'_ensure_existing_node_identity',
19401940
mock.DEFAULT))
1941+
1942+
1943+
class GreenThreadPoolShutdownWait(fixtures.Fixture):
1944+
"""Always wait for greenlets in greenpool to finish.
1945+
1946+
We use the futurist.GreenThreadPoolExecutor, for example, in compute
1947+
manager to run live migration jobs. It runs those jobs in bare greenlets
1948+
created by eventlet.spawn_n(). Bare greenlets cannot be killed the same
1949+
way as GreenThreads created by eventlet.spawn().
1950+
1951+
Because they cannot be killed, in the test environment we must either let
1952+
them run to completion or move on while they are still running (which can
1953+
cause test failures as the leaked greenlets attempt to access structures
1954+
that have already been torn down).
1955+
1956+
When a compute service is stopped by Service.stop(), the compute manager's
1957+
cleanup_host() method is called and while cleaning up, the compute manager
1958+
calls the GreenThreadPoolExecutor.shutdown() method with wait=False. This
1959+
means that a test running GreenThreadPoolExecutor jobs will not wait for
1960+
the bare greenlets to finish running -- it will instead move on immediately
1961+
while greenlets are still running.
1962+
1963+
This fixture will ensure GreenThreadPoolExecutor.shutdown() is always
1964+
called with wait=True in an effort to reduce the number of leaked bare
1965+
greenlets.
1966+
1967+
See https://bugs.launchpad.net/nova/+bug/1946339 for details.
1968+
"""
1969+
1970+
def setUp(self):
1971+
super().setUp()
1972+
real_shutdown = futurist.GreenThreadPoolExecutor.shutdown
1973+
self.useFixture(fixtures.MockPatch(
1974+
'futurist.GreenThreadPoolExecutor.shutdown',
1975+
lambda self, wait: real_shutdown(self, wait=True)))

0 commit comments

Comments
 (0)