Skip to content

Commit 21dc96d

Browse files
authored
Merge pull request #108 from stackhpc/upstream/zed-2024-01-08
Synchronise zed with upstream
2 parents 9c7e482 + 4575136 commit 21dc96d

File tree

13 files changed

+200
-29
lines changed

13 files changed

+200
-29
lines changed

neutron/agent/metadata/driver.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -294,7 +294,13 @@ def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf,
294294
pm = cls._get_metadata_proxy_process_manager(uuid, conf,
295295
ns_name=ns_name,
296296
callback=callback)
297-
pm.enable()
297+
try:
298+
pm.enable()
299+
except exceptions.ProcessExecutionError as exec_err:
300+
LOG.error("Encountered process execution error %(err)s while "
301+
"starting process in namespace %(ns)s",
302+
{"err": exec_err, "ns": ns_name})
303+
return
298304
monitor.register(uuid, METADATA_SERVICE_NAME, pm)
299305
cls.monitors[router_id] = pm
300306

neutron/agent/ovn/metadata/driver.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,13 @@ def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf,
205205
pm = cls._get_metadata_proxy_process_manager(uuid, conf,
206206
ns_name=ns_name,
207207
callback=callback)
208-
pm.enable()
208+
try:
209+
pm.enable()
210+
except exceptions.ProcessExecutionError as exec_err:
211+
LOG.error("Encountered process execution error %(err)s while "
212+
"starting process in namespace %(ns)s",
213+
{"err": exec_err, "ns": ns_name})
214+
return
209215
monitor.register(uuid, METADATA_SERVICE_NAME, pm)
210216
cls.monitors[router_id] = pm
211217

neutron/api/rpc/handlers/securitygroups_rpc.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -431,6 +431,10 @@ def _select_sg_ids_for_ports(self, context, ports):
431431
for sg_id in p['security_group_ids']))
432432
return [(sg_id, ) for sg_id in sg_ids]
433433

434-
def _is_security_group_stateful(self, context, sg_id):
435-
sg = self.rcache.get_resource_by_id(resources.SECURITYGROUP, sg_id)
436-
return sg.stateful
434+
def _get_sgs_stateful_flag(self, context, sg_ids):
435+
sgs_stateful = {}
436+
for sg_id in sg_ids:
437+
sg = self.rcache.get_resource_by_id(resources.SECURITYGROUP, sg_id)
438+
sgs_stateful[sg_id] = sg.stateful
439+
440+
return sgs_stateful

neutron/db/securitygroups_rpc_base.py

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -211,12 +211,10 @@ def security_group_info_for_ports(self, context, ports):
211211
# this set will be serialized into a list by rpc code
212212
remote_address_group_info[remote_ag_id][ethertype] = set()
213213
direction = rule_in_db['direction']
214-
stateful = self._is_security_group_stateful(context,
215-
security_group_id)
216214
rule_dict = {
217215
'direction': direction,
218216
'ethertype': ethertype,
219-
'stateful': stateful}
217+
}
220218

221219
for key in ('protocol', 'port_range_min', 'port_range_max',
222220
'remote_ip_prefix', 'remote_group_id',
@@ -234,6 +232,13 @@ def security_group_info_for_ports(self, context, ports):
234232
if rule_dict not in sg_info['security_groups'][security_group_id]:
235233
sg_info['security_groups'][security_group_id].append(
236234
rule_dict)
235+
236+
# Populate the security group "stateful" flag in the SGs list of rules.
237+
for sg_id, stateful in self._get_sgs_stateful_flag(
238+
context, sg_info['security_groups'].keys()).items():
239+
for rule in sg_info['security_groups'][sg_id]:
240+
rule['stateful'] = stateful
241+
237242
# Update the security groups info if they don't have any rules
238243
sg_ids = self._select_sg_ids_for_ports(context, ports)
239244
for (sg_id, ) in sg_ids:
@@ -427,13 +432,13 @@ def _select_sg_ids_for_ports(self, context, ports):
427432
"""
428433
raise NotImplementedError()
429434

430-
def _is_security_group_stateful(self, context, sg_id):
431-
"""Return whether the security group is stateful or not.
435+
def _get_sgs_stateful_flag(self, context, sg_id):
436+
"""Return the security groups stateful flag.
432437
433-
Return True if the security group associated with the given ID
434-
is stateful, else False.
438+
Returns a dictionary with the SG ID as key and the stateful flag:
439+
{sg_1: True, sg_2: False, ...}
435440
"""
436-
return True
441+
raise NotImplementedError()
437442

438443

439444
class SecurityGroupServerRpcMixin(SecurityGroupInfoAPIMixin,
@@ -530,5 +535,5 @@ def _select_ips_for_remote_address_group(self, context,
530535
return ips_by_group
531536

532537
@db_api.retry_if_session_inactive()
533-
def _is_security_group_stateful(self, context, sg_id):
534-
return sg_obj.SecurityGroup.get_sg_by_id(context, sg_id).stateful
538+
def _get_sgs_stateful_flag(self, context, sg_ids):
539+
return sg_obj.SecurityGroup.get_sgs_stateful_flag(context, sg_ids)

neutron/objects/securitygroup.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,13 @@ def get_bound_project_ids(cls, context, obj_id):
133133
security_group_ids=[obj_id])
134134
return {port.project_id for port in port_objs}
135135

136+
@classmethod
137+
@db_api.CONTEXT_READER
138+
def get_sgs_stateful_flag(cls, context, sg_ids):
139+
query = context.session.query(cls.db_model.id, cls.db_model.stateful)
140+
query = query.filter(cls.db_model.id.in_(sg_ids))
141+
return dict(query.all())
142+
136143

137144
@base.NeutronObjectRegistry.register
138145
class DefaultSecurityGroup(base.NeutronDbObject):

neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1351,7 +1351,8 @@ def sync_hostname_and_physical_networks(self, ctx):
13511351
LOG.debug('OVN-SB Sync hostname and physical networks started')
13521352
host_phynets_map = self.ovn_api.get_chassis_hostname_and_physnets()
13531353
current_hosts = set(host_phynets_map)
1354-
previous_hosts = segments_db.get_hosts_mapped_with_segments(ctx)
1354+
previous_hosts = segments_db.get_hosts_mapped_with_segments(
1355+
ctx, include_agent_types={ovn_const.OVN_CONTROLLER_AGENT})
13551356

13561357
stale_hosts = previous_hosts - current_hosts
13571358
for host in stale_hosts:

neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1818,6 +1818,22 @@ def setUp(self):
18181818
def _sync_resources(self):
18191819
self.sb_synchronizer.sync_hostname_and_physical_networks(self.ctx)
18201820

1821+
def create_agent(self, host, bridge_mappings=None, agent_type=None):
1822+
if agent_type is None:
1823+
agent_type = ovn_const.OVN_CONTROLLER_AGENT
1824+
if bridge_mappings is None:
1825+
bridge_mappings = {}
1826+
agent = {
1827+
'host': host,
1828+
'agent_type': agent_type,
1829+
'binary': '/bin/test',
1830+
'topic': 'test_topic',
1831+
'configurations': {'bridge_mappings': bridge_mappings}
1832+
}
1833+
_, status = self.plugin.create_or_update_agent(self.context, agent)
1834+
1835+
return status['id']
1836+
18211837
def create_segment(self, network_id, physical_network, segmentation_id):
18221838
segment_data = {'network_id': network_id,
18231839
'physical_network': physical_network,
@@ -1858,6 +1874,7 @@ def test_ovn_sb_sync_delete_stale_host(self):
18581874
segment = self.create_segment(network_id, 'physnet1', 50)
18591875
segments_db.update_segment_host_mapping(
18601876
self.ctx, 'host1', {segment['id']})
1877+
_ = self.create_agent('host1', bridge_mappings={'physnet1': 'eth0'})
18611878
segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx)
18621879
self.assertEqual({'host1'}, segment_hosts)
18631880
# Since there is no chassis in the sb DB, host1 is the stale host
@@ -1866,6 +1883,36 @@ def test_ovn_sb_sync_delete_stale_host(self):
18661883
segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx)
18671884
self.assertFalse(segment_hosts)
18681885

1886+
def test_ovn_sb_sync_host_with_no_agent_not_deleted(self):
1887+
with self.network() as network:
1888+
network_id = network['network']['id']
1889+
segment = self.create_segment(network_id, 'physnet1', 50)
1890+
segments_db.update_segment_host_mapping(
1891+
self.ctx, 'host1', {segment['id']})
1892+
_ = self.create_agent('host1', bridge_mappings={'physnet1': 'eth0'},
1893+
agent_type="Not OVN Agent")
1894+
segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx)
1895+
self.assertEqual({'host1'}, segment_hosts)
1896+
# There is no chassis in the sb DB, host1 does not have an agent
1897+
# so it is not deleted.
1898+
self._sync_resources()
1899+
segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx)
1900+
self.assertEqual({'host1'}, segment_hosts)
1901+
1902+
def test_ovn_sb_sync_host_with_other_agent_type_not_deleted(self):
1903+
with self.network() as network:
1904+
network_id = network['network']['id']
1905+
segment = self.create_segment(network_id, 'physnet1', 50)
1906+
segments_db.update_segment_host_mapping(
1907+
self.ctx, 'host1', {segment['id']})
1908+
segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx)
1909+
self.assertEqual({'host1'}, segment_hosts)
1910+
# There is no chassis in the sb DB, host1 does not have an agent
1911+
# so it is not deleted.
1912+
self._sync_resources()
1913+
segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx)
1914+
self.assertEqual({'host1'}, segment_hosts)
1915+
18691916
def test_ovn_sb_sync(self):
18701917
with self.network() as network:
18711918
network_id = network['network']['id']
@@ -1878,6 +1925,9 @@ def test_ovn_sb_sync(self):
18781925
segments_db.update_segment_host_mapping(
18791926
self.ctx, 'host3', {seg1['id']})
18801927
segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx)
1928+
_ = self.create_agent('host1')
1929+
_ = self.create_agent('host2', bridge_mappings={'physnet2': 'eth0'})
1930+
_ = self.create_agent('host3', bridge_mappings={'physnet3': 'eth0'})
18811931
self.assertEqual({'host1', 'host2', 'host3'}, segment_hosts)
18821932
self.add_fake_chassis('host2', ['physnet2'])
18831933
self.add_fake_chassis('host3', ['physnet3'])

neutron/tests/unit/agent/dhcp/test_agent.py

Lines changed: 31 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -807,23 +807,37 @@ def _process_manager_constructor_call(self, ns=FAKE_NETWORK_DHCP_NS):
807807
def _enable_dhcp_helper(self, network, enable_isolated_metadata=False,
808808
is_isolated_network=False, is_ovn_network=False):
809809
self.dhcp._process_monitor = mock.Mock()
810+
# The disable() call
811+
gmppm_expected_calls = [mock.call(FAKE_NETWORK_UUID, cfg.CONF,
812+
ns_name=FAKE_NETWORK_DHCP_NS)]
810813
if enable_isolated_metadata:
811814
cfg.CONF.set_override('enable_isolated_metadata', True)
815+
if is_isolated_network:
816+
# The enable() call
817+
gmppm_expected_calls.append(
818+
mock.call(FAKE_NETWORK_UUID, cfg.CONF,
819+
ns_name=FAKE_NETWORK_DHCP_NS,
820+
callback=mock.ANY))
812821
self.plugin.get_network_info.return_value = network
813-
self.dhcp.enable_dhcp_helper(network.id)
822+
process_instance = mock.Mock(active=False)
823+
with mock.patch.object(metadata_driver.MetadataDriver,
824+
'_get_metadata_proxy_process_manager',
825+
return_value=process_instance) as gmppm:
826+
self.dhcp.enable_dhcp_helper(network.id)
827+
gmppm.assert_has_calls(gmppm_expected_calls)
814828
self.plugin.assert_has_calls([
815829
mock.call.get_network_info(network.id)])
816830
self.call_driver.assert_called_once_with('enable', network)
817831
self.cache.assert_has_calls([mock.call.put(network)])
818832
if (is_isolated_network and enable_isolated_metadata and not
819833
is_ovn_network):
820-
self.external_process.assert_has_calls([
821-
self._process_manager_constructor_call(),
822-
mock.call().enable()], any_order=True)
834+
process_instance.assert_has_calls([
835+
mock.call.disable(sig=str(int(signal.SIGTERM))),
836+
mock.call.get_pid_file_name(),
837+
mock.call.enable()])
823838
else:
824-
self.external_process.assert_has_calls([
825-
self._process_manager_constructor_call(),
826-
mock.call().disable(sig=str(int(signal.SIGTERM)))])
839+
process_instance.assert_has_calls([
840+
mock.call.disable(sig=str(int(signal.SIGTERM)))])
827841

828842
def test_enable_dhcp_helper_enable_metadata_isolated_network(self):
829843
self._enable_dhcp_helper(isolated_network,
@@ -997,11 +1011,16 @@ def test_disable_dhcp_helper_driver_failure(self):
9971011

9981012
def test_enable_isolated_metadata_proxy(self):
9991013
self.dhcp._process_monitor = mock.Mock()
1000-
self.dhcp.enable_isolated_metadata_proxy(fake_network)
1001-
self.external_process.assert_has_calls([
1002-
self._process_manager_constructor_call(),
1003-
mock.call().enable()
1004-
], any_order=True)
1014+
process_instance = mock.Mock(active=False)
1015+
with mock.patch.object(metadata_driver.MetadataDriver,
1016+
'_get_metadata_proxy_process_manager',
1017+
return_value=process_instance) as gmppm:
1018+
self.dhcp.enable_isolated_metadata_proxy(fake_network)
1019+
gmppm.assert_called_with(FAKE_NETWORK_UUID,
1020+
cfg.CONF,
1021+
ns_name=FAKE_NETWORK_DHCP_NS,
1022+
callback=mock.ANY)
1023+
process_instance.enable.assert_called_once()
10051024

10061025
def test_disable_isolated_metadata_proxy(self):
10071026
method_path = ('neutron.agent.metadata.driver.MetadataDriver'

neutron/tests/unit/agent/metadata/test_driver.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
from unittest import mock
1919

2020
from neutron_lib import constants
21+
from neutron_lib import exceptions as lib_exceptions
2122
from neutron_lib import fixture as lib_fixtures
2223
from oslo_config import cfg
2324
from oslo_utils import uuidutils
@@ -241,6 +242,26 @@ def test_spawn_metadata_proxy(self):
241242
def test_spawn_metadata_proxy_dad_failed(self):
242243
self._test_spawn_metadata_proxy(dad_failed=True)
243244

245+
@mock.patch.object(metadata_driver.LOG, 'error')
246+
def test_spawn_metadata_proxy_handles_process_exception(self, error_log):
247+
process_instance = mock.Mock(active=False)
248+
process_instance.enable.side_effect = (
249+
lib_exceptions.ProcessExecutionError('Something happened', -1))
250+
with mock.patch.object(metadata_driver.MetadataDriver,
251+
'_get_metadata_proxy_process_manager',
252+
return_value=process_instance):
253+
process_monitor = mock.Mock()
254+
network_id = 123456
255+
metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
256+
process_monitor,
257+
'dummy_namespace',
258+
self.METADATA_PORT,
259+
cfg.CONF,
260+
network_id=network_id)
261+
error_log.assert_called_once()
262+
process_monitor.register.assert_not_called()
263+
self.assertNotIn(network_id, metadata_driver.MetadataDriver.monitors)
264+
244265
def test_create_config_file_wrong_user(self):
245266
with mock.patch('pwd.getpwnam', side_effect=KeyError):
246267
config = metadata_driver.HaproxyConfigurator(_uuid(),

neutron/tests/unit/agent/ovn/metadata/test_driver.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
import os
1717
from unittest import mock
1818

19+
from neutron_lib import exceptions as lib_exceptions
1920
from neutron_lib import fixture as lib_fixtures
2021
from oslo_config import cfg
2122
from oslo_utils import uuidutils
@@ -112,6 +113,29 @@ def test_spawn_metadata_proxy(self):
112113
run_as_root=True)
113114
])
114115

116+
@mock.patch.object(metadata_driver.LOG, 'error')
117+
def test_spawn_metadata_proxy_handles_process_exception(self, error_log):
118+
process_instance = mock.Mock(active=False)
119+
process_instance.enable.side_effect = (
120+
lib_exceptions.ProcessExecutionError('Something happened', -1))
121+
122+
with mock.patch.object(metadata_driver.MetadataDriver,
123+
'_get_metadata_proxy_process_manager',
124+
return_value=process_instance):
125+
process_monitor = mock.Mock()
126+
network_id = 123456
127+
128+
metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
129+
process_monitor,
130+
'dummy_namespace',
131+
self.METADATA_PORT,
132+
cfg.CONF,
133+
network_id=network_id)
134+
135+
error_log.assert_called_once()
136+
process_monitor.register.assert_not_called()
137+
self.assertNotIn(network_id, metadata_driver.MetadataDriver.monitors)
138+
115139
def test_create_config_file_wrong_user(self):
116140
with mock.patch('pwd.getpwnam', side_effect=KeyError):
117141
config = metadata_driver.HaproxyConfigurator(mock.ANY, mock.ANY,

0 commit comments

Comments
 (0)