Skip to content

Commit 00c0d19

Browse files
authored
Merge pull request #72 from stackhpc/upstream/yoga-2023-11-10
Synchronise yoga with upstream
2 parents d4e1df3 + cdaee88 commit 00c0d19

File tree

5 files changed

+73
-23
lines changed

5 files changed

+73
-23
lines changed

neutron/agent/ovn/metadata/agent.py

Lines changed: 30 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,13 +14,15 @@
1414

1515
import collections
1616
import functools
17+
from random import randint
1718
import re
1819
import threading
1920
import uuid
2021

2122
import netaddr
2223
from neutron_lib import constants as n_const
2324
from oslo_concurrency import lockutils
25+
from oslo_config import cfg
2426
from oslo_log import log
2527
from oslo_utils import netutils
2628
from ovsdbapp.backend.ovs_idl import event as row_event
@@ -35,10 +37,12 @@
3537
from neutron.common.ovn import constants as ovn_const
3638
from neutron.common.ovn import utils as ovn_utils
3739
from neutron.common import utils
40+
from neutron.conf.agent.database import agents_db
3841
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf as config
3942

4043

4144
LOG = log.getLogger(__name__)
45+
agents_db.register_db_agents_opts()
4246
_SYNC_STATE_LOCK = lockutils.ReaderWriterLock()
4347
CHASSIS_METADATA_LOCK = 'chassis_metadata_lock'
4448

@@ -186,14 +190,34 @@ def __init__(self, metadata_agent):
186190
events = (self.ROW_UPDATE,)
187191
super(SbGlobalUpdateEvent, self).__init__(events, table, None)
188192
self.event_name = self.__class__.__name__
193+
self.first_run = True
189194

190195
def run(self, event, row, old):
191-
table = ('Chassis_Private' if self.agent.has_chassis_private
192-
else 'Chassis')
193-
self.agent.sb_idl.db_set(
194-
table, self.agent.chassis, ('external_ids', {
195-
ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY:
196-
str(row.nb_cfg)})).execute()
196+
197+
def _update_chassis(self, row):
198+
table = ('Chassis_Private' if self.agent.has_chassis_private
199+
else 'Chassis')
200+
self.agent.sb_idl.db_set(
201+
table, self.agent.chassis, ('external_ids', {
202+
ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY:
203+
str(row.nb_cfg)})).execute()
204+
205+
delay = 0
206+
if self.first_run:
207+
self.first_run = False
208+
else:
209+
# We occasionally see port binding failed errors due to
210+
# the ml2 driver refusing to bind the port to a dead agent.
211+
# if all agents heartbeat at the same time, they will all
212+
# cause a load spike on the server. To mitigate that we
213+
# need to spread out the load by introducing a random delay.
214+
# clamp the max delay between 3 and 10 seconds.
215+
max_delay = max(min(cfg.CONF.agent_down_time // 3, 10), 3)
216+
delay = randint(0, max_delay)
217+
218+
LOG.debug("Delaying updating chassis table for %s seconds", delay)
219+
timer = threading.Timer(delay, _update_chassis, [self, row])
220+
timer.start()
197221

198222

199223
class MetadataAgent(object):

neutron/db/db_base_plugin_v2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@
7575

7676

7777
def _ensure_subnet_not_used(context, subnet_id):
78-
models_v2.Subnet.lock_register(
78+
models_v2.Subnet.write_lock_register(
7979
context, exc.SubnetInUse(subnet_id=subnet_id), id=subnet_id)
8080
try:
8181
registry.publish(

neutron/db/ipam_backend_mixin.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -688,7 +688,7 @@ def _ipam_get_subnets(self, context, network_id, host, service_type=None,
688688
msg = ('This subnet is being modified by another concurrent '
689689
'operation')
690690
for subnet in subnets:
691-
subnet.lock_register(
691+
subnet.read_lock_register(
692692
context, exc.SubnetInUse(subnet_id=subnet.id, reason=msg),
693693
id=subnet.id)
694694
subnet_dicts = [self._make_subnet_dict(subnet, context=context)

neutron/db/models_v2.py

Lines changed: 39 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -33,25 +33,51 @@
3333
class HasInUse(object):
3434
"""NeutronBaseV2 mixin, to add the flag "in_use" to a DB model.
3535
36-
The content of this flag (boolean) parameter is not relevant. The goal of
37-
this field is to be used in a write transaction to mark a DB register as
38-
"in_use". Writing any value on this DB parameter will lock the container
39-
register. At the end of the DB transaction, the DB engine will check if
40-
this register was modified or deleted. In such case, the transaction will
41-
fail and won't be commited.
42-
43-
"lock_register" is the method to write the register "in_use" column.
44-
Because the lifespan of this DB lock is the DB transaction, there isn't an
45-
unlock method. The lock will finish once the transaction ends.
36+
The goal of this class is to allow users lock specific database rows with
37+
a shared or exclusive lock (without necessarily introducing a change in
38+
the table itself). Having these locks allows the DB engine to prevent
39+
concurrent modifications (e.g. the deletion of a resource while we are
40+
currently adding a new dependency on the resource).
41+
42+
"read_lock_register" takes a shared DB lock on the row specified by the
43+
filters. The lock is automatically released once the transaction ends.
44+
You can have any number of parallel read locks on the same DB row. But
45+
you can not have any write lock in parallel.
46+
47+
"write_lock_register" takes an exclusive DB lock on the row specified by
48+
the filters. The lock is automatically released on transaction commit.
49+
You may only have one write lock on each row at a time. It therefor
50+
blocks all other read and write locks to this row.
4651
"""
52+
# keep this value to not need to update the database schema
53+
# only at backport
4754
in_use = sa.Column(sa.Boolean(), nullable=False,
4855
server_default=sql.false(), default=False)
4956

5057
@classmethod
51-
def lock_register(cls, context, exception, **filters):
58+
def write_lock_register(cls, context, exception, **filters):
59+
# we use `with_for_update()` to include `FOR UPDATE` in the sql
60+
# statement.
61+
# we need to set `enable_eagerloads(False)` so that we do not try to
62+
# load attached resources (e.g. standardattributes) as this breaks the
63+
# `FOR UPDATE` statement.
5264
num_reg = context.session.query(
53-
cls).filter_by(**filters).update({'in_use': True})
54-
if num_reg != 1:
65+
cls).filter_by(**filters).enable_eagerloads(
66+
False).with_for_update().first()
67+
if num_reg is None:
68+
raise exception
69+
70+
@classmethod
71+
def read_lock_register(cls, context, exception, **filters):
72+
# we use `with_for_update(read=True)` to include `LOCK IN SHARE MODE`
73+
# in the sql statement.
74+
# we need to set `enable_eagerloads(False)` so that we do not try to
75+
# load attached resources (e.g. standardattributes) as this breaks the
76+
# `LOCK IN SHARE MODE` statement.
77+
num_reg = context.session.query(
78+
cls).filter_by(**filters).enable_eagerloads(
79+
False).with_for_update(read=True).first()
80+
if num_reg is None:
5581
raise exception
5682

5783

zuul.d/base.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@
151151

152152
- job:
153153
name: neutron-linuxbridge-tempest-plugin-scenario-nftables
154-
parent: neutron-tempest-plugin-scenario-linuxbridge
154+
parent: neutron-tempest-plugin-scenario-linuxbridge-yoga
155155
pre-run: playbooks/install_nftables.yaml
156156
vars:
157157
devstack_local_conf:
@@ -162,7 +162,7 @@
162162

163163
- job:
164164
name: neutron-ovs-tempest-plugin-scenario-iptables_hybrid-nftables
165-
parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
165+
parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-yoga
166166
pre-run: playbooks/install_nftables.yaml
167167
vars:
168168
devstack_local_conf:

0 commit comments

Comments
 (0)