Skip to content

Commit 412deb3

Browse files
authored
Merge pull request #52 from stackhpc/upstream/yoga-2023-07-10
Synchronise yoga with upstream
2 parents 66ddd0b + 1b33822 commit 412deb3

File tree

15 files changed

+195
-31
lines changed

15 files changed

+195
-31
lines changed

neutron/agent/l3/dvr_local_router.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,9 @@ def __init__(self, host, *args, **kwargs):
4747
self.rtr_fip_connect = False
4848
self.fip_ns = None
4949
self._pending_arp_set = set()
50+
51+
def initialize(self, process_monitor):
52+
super().initialize(process_monitor)
5053
self._load_used_fip_information()
5154

5255
def _load_used_fip_information(self):

neutron/api/rpc/handlers/securitygroups_rpc.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -303,6 +303,13 @@ def _clear_child_sg_rules(self, rtype, event, trigger, payload):
303303
for rule in self.rcache.get_resources('SecurityGroupRule', filters):
304304
self.rcache.record_resource_delete(context, 'SecurityGroupRule',
305305
rule.id)
306+
# If there's a rule which remote is the deleted sg, remove that also.
307+
rules = self.rcache.match_resources_with_func(
308+
'SecurityGroupRule',
309+
lambda sg_rule: sg_rule.remote_group_id == existing.id)
310+
for rule in rules:
311+
self.rcache.record_resource_delete(context, 'SecurityGroupRule',
312+
rule.id)
306313

307314
def _handle_sg_rule_delete(self, rtype, event, trigger, payload):
308315
existing = payload.states[0]

neutron/common/ovn/constants.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@
5151
OVN_LIVENESS_CHECK_EXT_ID_KEY = 'neutron:liveness_check_at'
5252
METADATA_LIVENESS_CHECK_EXT_ID_KEY = 'neutron:metadata_liveness_check_at'
5353
OVN_PORT_BINDING_PROFILE = portbindings.PROFILE
54+
OVN_HOST_ID_EXT_ID_KEY = 'neutron:host_id'
5455

5556
MIGRATING_ATTR = 'migrating_to'
5657
OVN_ROUTER_PORT_OPTION_KEYS = ['router-port', 'nat-addresses',

neutron/db/l3_db.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1730,15 +1730,6 @@ def prevent_l3_port_deletion(self, context, port_id, port=None):
17301730
return
17311731
if port['device_owner'] not in self.router_device_owners:
17321732
return
1733-
# Raise port in use only if the port has IP addresses
1734-
# Otherwise it's a stale port that can be removed
1735-
fixed_ips = port['fixed_ips']
1736-
if not fixed_ips:
1737-
LOG.debug("Port %(port_id)s has owner %(port_owner)s, but "
1738-
"no IP address, so it can be deleted",
1739-
{'port_id': port['id'],
1740-
'port_owner': port['device_owner']})
1741-
return
17421733
# NOTE(kevinbenton): we also check to make sure that the
17431734
# router still exists. It's possible for HA router interfaces
17441735
# to remain after the router is deleted if they encounter an

neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1040,7 +1040,7 @@ def get_workers(self):
10401040
# See doc/source/design/ovn_worker.rst for more details.
10411041
return [worker.MaintenanceWorker()]
10421042

1043-
def _update_dnat_entry_if_needed(self, port_id, up=True):
1043+
def _update_dnat_entry_if_needed(self, port_id):
10441044
"""Update DNAT entry if using distributed floating ips."""
10451045
if not self.nb_ovn:
10461046
self.nb_ovn = self._ovn_client._nb_idl
@@ -1061,9 +1061,9 @@ def _update_dnat_entry_if_needed(self, port_id, up=True):
10611061
{ovn_const.OVN_FIP_EXT_MAC_KEY:
10621062
nat['external_mac']})).execute()
10631063

1064-
if up and ovn_conf.is_ovn_distributed_floating_ip():
1065-
mac = nat['external_ids'][ovn_const.OVN_FIP_EXT_MAC_KEY]
1066-
if nat['external_mac'] != mac:
1064+
if ovn_conf.is_ovn_distributed_floating_ip():
1065+
mac = nat['external_ids'].get(ovn_const.OVN_FIP_EXT_MAC_KEY)
1066+
if mac and nat['external_mac'] != mac:
10671067
LOG.debug("Setting external_mac of port %s to %s",
10681068
port_id, mac)
10691069
self.nb_ovn.db_set(
@@ -1118,6 +1118,8 @@ def set_port_status_up(self, port_id):
11181118
const.PORT_STATUS_ACTIVE)
11191119
elif self._should_notify_nova(db_port):
11201120
self._plugin.nova_notifier.notify_port_active_direct(db_port)
1121+
1122+
self._ovn_client.update_lsp_host_info(admin_context, db_port)
11211123
except (os_db_exc.DBReferenceError, n_exc.PortNotFound):
11221124
LOG.debug('Port not found during OVN status up report: %s',
11231125
port_id)
@@ -1129,7 +1131,7 @@ def set_port_status_down(self, port_id):
11291131
# to prevent another entity from bypassing the block with its own
11301132
# port status update.
11311133
LOG.info("OVN reports status down for port: %s", port_id)
1132-
self._update_dnat_entry_if_needed(port_id, False)
1134+
self._update_dnat_entry_if_needed(port_id)
11331135
admin_context = n_context.get_admin_context()
11341136
try:
11351137
db_port = ml2_db.get_port(admin_context, port_id)
@@ -1146,6 +1148,9 @@ def set_port_status_down(self, port_id):
11461148
None)
11471149
self._plugin.nova_notifier.send_port_status(
11481150
None, None, db_port)
1151+
1152+
self._ovn_client.update_lsp_host_info(
1153+
admin_context, db_port, up=False)
11491154
except (os_db_exc.DBReferenceError, n_exc.PortNotFound):
11501155
LOG.debug("Port not found during OVN status down report: %s",
11511156
port_id)

neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,7 @@ def run_idl(self, txn):
124124
'Logical_Switch_Port', 'name',
125125
self.lport, None)
126126
if port:
127+
self.result = port.uuid
127128
return
128129

129130
port = txn.insert(self.api._tables['Logical_Switch_Port'])

neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py

Lines changed: 36 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -271,6 +271,36 @@ def determine_bind_host(self, port, port_context=None):
271271
ovn_const.VIF_DETAILS_CARD_SERIAL_NUMBER]).hostname
272272
return ''
273273

274+
def update_lsp_host_info(self, context, db_port, up=True):
275+
"""Update the binding hosting information for the LSP.
276+
277+
Update the binding hosting information in the Logical_Switch_Port
278+
external_ids column. See LP #2020058 for more information.
279+
280+
:param context: Neutron API context.
281+
:param db_port: The Neutron port.
282+
:param up: If True add the host information, if False remove it.
283+
Defaults to True.
284+
"""
285+
cmd = []
286+
if up:
287+
if not db_port.port_bindings:
288+
return
289+
host = db_port.port_bindings[0].host
290+
291+
ext_ids = ('external_ids',
292+
{ovn_const.OVN_HOST_ID_EXT_ID_KEY: host})
293+
cmd.append(
294+
self._nb_idl.db_set(
295+
'Logical_Switch_Port', db_port.id, ext_ids))
296+
else:
297+
cmd.append(
298+
self._nb_idl.db_remove(
299+
'Logical_Switch_Port', db_port.id, 'external_ids',
300+
ovn_const.OVN_HOST_ID_EXT_ID_KEY, if_exists=True))
301+
302+
self._transaction(cmd)
303+
274304
def _get_port_options(self, port):
275305
context = n_context.get_admin_context()
276306
binding_prof = utils.validate_and_get_data_from_binding_profile(port)
@@ -426,7 +456,12 @@ def _get_port_options(self, port):
426456
# a RARP packet from it to inform network about the new
427457
# location of the port
428458
options['activation-strategy'] = 'rarp'
429-
options[ovn_const.LSP_OPTIONS_REQUESTED_CHASSIS_KEY] = chassis
459+
460+
# Virtual ports can not be bound by using the
461+
# requested-chassis mechanism, ovn-controller will create the
462+
# Port_Binding entry when it sees an ARP coming from the VIP
463+
if port_type != ovn_const.LSP_TYPE_VIRTUAL:
464+
options[ovn_const.LSP_OPTIONS_REQUESTED_CHASSIS_KEY] = chassis
430465

431466
# TODO(lucasagomes): Enable the mcast_flood_reports by default,
432467
# according to core OVN developers it shouldn't cause any harm

neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_client.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,3 +66,21 @@ def check_metadata_port(enable_dhcp):
6666
# because it checks first if the metadata port exists.
6767
ovn_client.create_metadata_port(self.context, network)
6868
check_metadata_port(enable_dhcp)
69+
70+
def test_create_port(self):
71+
with self.network('test-ovn-client') as net:
72+
with self.subnet(net) as subnet:
73+
with self.port(subnet) as port:
74+
port_data = port['port']
75+
nb_ovn = self.mech_driver.nb_ovn
76+
lsp = nb_ovn.lsp_get(port_data['id']).execute()
77+
# The logical switch port has been created during the
78+
# port creation.
79+
self.assertIsNotNone(lsp)
80+
ovn_client = self.mech_driver._ovn_client
81+
port_data = self.plugin.get_port(self.context,
82+
port_data['id'])
83+
# Call the create_port again to ensure that the create
84+
# command automatically checks for existing logical
85+
# switch ports
86+
ovn_client.create_port(self.context, port_data)

neutron/tests/unit/agent/l3/test_dvr_local_router.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -287,6 +287,15 @@ def test__load_used_fip_information(self, mock_add_ip_rule):
287287
self.assertEqual(sorted(ret, key=lambda ret: ret[0]),
288288
fip_rule_prio_list)
289289

290+
@mock.patch.object(router_info.RouterInfo, 'initialize')
291+
def test_initialize_dvr_local_router(self, super_initialize):
292+
ri = self._create_router()
293+
self.mock_load_fip.assert_not_called()
294+
295+
ri.initialize(self.process_monitor)
296+
super_initialize.assert_called_once_with(self.process_monitor)
297+
self.mock_load_fip.assert_called_once()
298+
290299
def test_get_floating_ips_dvr(self):
291300
router = mock.MagicMock()
292301
router.get.return_value = [{'host': HOSTNAME},

neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py

Lines changed: 49 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
from unittest import mock
1616

1717
import netaddr
18+
from neutron_lib.callbacks import events
1819
from neutron_lib import context
1920
from oslo_utils import uuidutils
2021

@@ -120,13 +121,14 @@ def _make_address_group_ovo(self, *args, **kwargs):
120121
return_value=False)
121122
def _make_security_group_ovo(self, *args, **kwargs):
122123
attrs = {'id': uuidutils.generate_uuid(), 'revision_number': 1}
124+
r_group = kwargs.get('remote_group_id') or attrs['id']
123125
sg_rule = securitygroup.SecurityGroupRule(
124126
id=uuidutils.generate_uuid(),
125127
security_group_id=attrs['id'],
126128
direction='ingress',
127129
ethertype='IPv4', protocol='tcp',
128130
port_range_min=400,
129-
remote_group_id=attrs['id'],
131+
remote_group_id=r_group,
130132
revision_number=1,
131133
remote_address_group_id=kwargs.get('remote_address_group_id',
132134
None),
@@ -198,6 +200,52 @@ def test_sg_member_update_events(self):
198200
self.sg_agent.security_groups_member_updated.assert_called_with(
199201
{s1.id})
200202

203+
def test_sg_delete_events_with_remote(self):
204+
s1 = self._make_security_group_ovo(remote_group_id='')
205+
s2 = self._make_security_group_ovo(remote_group_id=s1.id)
206+
rules = self.rcache.get_resources(
207+
'SecurityGroupRule',
208+
filters={'security_group_id': (s1.id, s2.id)})
209+
self.assertEqual(2, len(rules))
210+
self.assertEqual(s1.id, rules[0].remote_group_id)
211+
212+
self.shim._clear_child_sg_rules(
213+
'SecurityGroup', 'after_delete', '',
214+
events.DBEventPayload(
215+
context=self.ctx,
216+
states=[s1]
217+
)
218+
)
219+
rules = self.rcache.get_resources(
220+
'SecurityGroupRule',
221+
filters={'security_group_id': (s1.id, s2.id)})
222+
self.assertEqual(0, len(rules))
223+
224+
def test_sg_delete_events_without_remote(self):
225+
s1 = self._make_security_group_ovo()
226+
s2 = self._make_security_group_ovo()
227+
rules = self.rcache.get_resources(
228+
'SecurityGroupRule',
229+
filters={'security_group_id': (s1.id, s2.id)})
230+
self.assertEqual(2, len(rules))
231+
self.assertEqual(s1.id, rules[0].remote_group_id)
232+
233+
self.shim._clear_child_sg_rules(
234+
'SecurityGroup', 'after_delete', '',
235+
events.DBEventPayload(
236+
context=self.ctx,
237+
states=[s1]
238+
)
239+
)
240+
s1_rules = self.rcache.get_resources(
241+
'SecurityGroupRule',
242+
filters={'security_group_id': (s1.id, )})
243+
self.assertEqual(0, len(s1_rules))
244+
s2_rules = self.rcache.get_resources(
245+
'SecurityGroupRule',
246+
filters={'security_group_id': (s2.id, )})
247+
self.assertEqual(1, len(s2_rules))
248+
201249
def test_get_secgroup_ids_for_address_group(self):
202250
ag = self._make_address_group_ovo()
203251
sg1 = self._make_security_group_ovo(remote_address_group_id=ag.id)

0 commit comments

Comments
 (0)