Skip to content

Commit b74c29f

Browse files
authored
Move towards a dedicated Duration class (#19223)
We have various constants to try and avoid mistyping of durations, e.g. `ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND`, however this can get a little verbose and doesn't help with typing. Instead, let's move towards a dedicated `Duration` class (basically a [`timedelta`](https://docs.python.org/3/library/datetime.html#timedelta-objects) with helper methods). This PR introduces the new types and converts all usages of the existing constants with it. Future PRs may work to move the clock methods to also use it (e.g. `call_later` and `looping_call`). Reviewable commit-by-commit.
1 parent 2741ead commit b74c29f

File tree

15 files changed

+95
-93
lines changed

15 files changed

+95
-93
lines changed

changelog.d/19223.misc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Move towards using a dedicated `Duration` type.

synapse/app/phone_stats_home.py

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -30,24 +30,20 @@
3030

3131
from synapse.metrics import SERVER_NAME_LABEL
3232
from synapse.types import JsonDict
33-
from synapse.util.constants import (
34-
MILLISECONDS_PER_SECOND,
35-
ONE_HOUR_SECONDS,
36-
ONE_MINUTE_SECONDS,
37-
)
33+
from synapse.util.duration import Duration
3834

3935
if TYPE_CHECKING:
4036
from synapse.server import HomeServer
4137

4238
logger = logging.getLogger("synapse.app.homeserver")
4339

44-
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS = 5 * ONE_MINUTE_SECONDS
40+
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME = Duration(minutes=5)
4541
"""
4642
We wait 5 minutes to send the first set of stats as the server can be quite busy the
4743
first few minutes
4844
"""
4945

50-
PHONE_HOME_INTERVAL_SECONDS = 3 * ONE_HOUR_SECONDS
46+
PHONE_HOME_INTERVAL = Duration(hours=3)
5147
"""
5248
Phone home stats are sent every 3 hours
5349
"""
@@ -222,13 +218,13 @@ def performance_stats_init() -> None:
222218
# table will decrease
223219
clock.looping_call(
224220
hs.get_datastores().main.generate_user_daily_visits,
225-
5 * ONE_MINUTE_SECONDS * MILLISECONDS_PER_SECOND,
221+
Duration(minutes=5).as_millis(),
226222
)
227223

228224
# monthly active user limiting functionality
229225
clock.looping_call(
230226
hs.get_datastores().main.reap_monthly_active_users,
231-
ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND,
227+
Duration(hours=1).as_millis(),
232228
)
233229
hs.get_datastores().main.reap_monthly_active_users()
234230

@@ -274,7 +270,7 @@ async def _generate_monthly_active_users() -> None:
274270
logger.info("Scheduling stats reporting for 3 hour intervals")
275271
clock.looping_call(
276272
phone_stats_home,
277-
PHONE_HOME_INTERVAL_SECONDS * MILLISECONDS_PER_SECOND,
273+
PHONE_HOME_INTERVAL.as_millis(),
278274
hs,
279275
stats,
280276
)
@@ -289,7 +285,7 @@ async def _generate_monthly_active_users() -> None:
289285
# We wait 5 minutes to send the first set of stats as the server can
290286
# be quite busy the first few minutes
291287
clock.call_later(
292-
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS,
288+
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME.as_secs(),
293289
phone_stats_home,
294290
hs,
295291
stats,

synapse/handlers/sliding_sync/room_lists.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
from synapse.events import StrippedStateEvent
4040
from synapse.events.utils import parse_stripped_state_event
4141
from synapse.logging.opentracing import start_active_span, trace
42-
from synapse.storage.databases.main.sliding_sync import UPDATE_INTERVAL_LAST_USED_TS_MS
42+
from synapse.storage.databases.main.sliding_sync import UPDATE_INTERVAL_LAST_USED_TS
4343
from synapse.storage.databases.main.state import (
4444
ROOM_UNKNOWN_SENTINEL,
4545
Sentinel as StateSentinel,
@@ -70,7 +70,7 @@
7070
)
7171
from synapse.types.state import StateFilter
7272
from synapse.util import MutableOverlayMapping
73-
from synapse.util.constants import MILLISECONDS_PER_SECOND, ONE_HOUR_SECONDS
73+
from synapse.util.duration import Duration
7474
from synapse.util.sentinel import Sentinel
7575

7676
if TYPE_CHECKING:
@@ -85,7 +85,7 @@
8585
# tight loops with clients that request lots of data at once.
8686
#
8787
# c.f. `NUM_ROOMS_THRESHOLD`. These values are somewhat arbitrary picked.
88-
MINIMUM_NOT_USED_AGE_EXPIRY_MS = ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND
88+
MINIMUM_NOT_USED_AGE_EXPIRY = Duration(hours=1)
8989

9090
# How many rooms with updates we allow before we consider the connection expired
9191
# due to too many rooms to send.
@@ -99,7 +99,7 @@
9999
# connection even if it is actively being used (and we're just not updating the
100100
# DB frequently enough). We arbitrarily double the update interval to give some
101101
# wiggle room.
102-
assert 2 * UPDATE_INTERVAL_LAST_USED_TS_MS < MINIMUM_NOT_USED_AGE_EXPIRY_MS
102+
assert 2 * UPDATE_INTERVAL_LAST_USED_TS < MINIMUM_NOT_USED_AGE_EXPIRY
103103

104104
# Helper definition for the types that we might return. We do this to avoid
105105
# copying data between types (which can be expensive for many rooms).
@@ -913,7 +913,7 @@ async def _filter_relevant_rooms_to_send(
913913
if (
914914
last_sync_ts is not None
915915
and (self._clock.time_msec() - last_sync_ts)
916-
> MINIMUM_NOT_USED_AGE_EXPIRY_MS
916+
> MINIMUM_NOT_USED_AGE_EXPIRY.as_millis()
917917
):
918918
raise SlidingSyncUnknownPosition()
919919

synapse/handlers/worker_lock.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
from synapse.storage.databases.main.lock import Lock, LockStore
4040
from synapse.util.async_helpers import timeout_deferred
4141
from synapse.util.clock import Clock
42-
from synapse.util.constants import ONE_MINUTE_SECONDS
42+
from synapse.util.duration import Duration
4343

4444
if TYPE_CHECKING:
4545
from synapse.logging.opentracing import opentracing
@@ -276,7 +276,7 @@ async def __aexit__(
276276
def _get_next_retry_interval(self) -> float:
277277
next = self._retry_interval
278278
self._retry_interval = max(5, next * 2)
279-
if self._retry_interval > 10 * ONE_MINUTE_SECONDS: # >7 iterations
279+
if self._retry_interval > Duration(minutes=10).as_secs(): # >7 iterations
280280
logger.warning(
281281
"Lock timeout is getting excessive: %ss. There may be a deadlock.",
282282
self._retry_interval,
@@ -363,7 +363,7 @@ async def __aexit__(
363363
def _get_next_retry_interval(self) -> float:
364364
next = self._retry_interval
365365
self._retry_interval = max(5, next * 2)
366-
if self._retry_interval > 10 * ONE_MINUTE_SECONDS: # >7 iterations
366+
if self._retry_interval > Duration(minutes=10).as_secs(): # >7 iterations
367367
logger.warning(
368368
"Lock timeout is getting excessive: %ss. There may be a deadlock.",
369369
self._retry_interval,

synapse/rest/client/transactions.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,13 +34,14 @@
3434
from synapse.logging.context import make_deferred_yieldable, run_in_background
3535
from synapse.types import JsonDict, Requester
3636
from synapse.util.async_helpers import ObservableDeferred
37+
from synapse.util.duration import Duration
3738

3839
if TYPE_CHECKING:
3940
from synapse.server import HomeServer
4041

4142
logger = logging.getLogger(__name__)
4243

43-
CLEANUP_PERIOD_MS = 1000 * 60 * 30 # 30 mins
44+
CLEANUP_PERIOD = Duration(minutes=30)
4445

4546

4647
P = ParamSpec("P")
@@ -56,7 +57,7 @@ def __init__(self, hs: "HomeServer"):
5657
] = {}
5758
# Try to clean entries every 30 mins. This means entries will exist
5859
# for at *LEAST* 30 mins, and at *MOST* 60 mins.
59-
self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS)
60+
self.clock.looping_call(self._cleanup, CLEANUP_PERIOD.as_millis())
6061

6162
def _get_transaction_key(self, request: IRequest, requester: Requester) -> Hashable:
6263
"""A helper function which returns a transaction key that can be used
@@ -145,5 +146,5 @@ def _cleanup(self) -> None:
145146
now = self.clock.time_msec()
146147
for key in list(self.transactions):
147148
ts = self.transactions[key][1]
148-
if now > (ts + CLEANUP_PERIOD_MS): # after cleanup period
149+
if now > (ts + CLEANUP_PERIOD.as_millis()): # after cleanup period
149150
del self.transactions[key]

synapse/storage/databases/main/deviceinbox.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -48,9 +48,9 @@
4848
)
4949
from synapse.storage.util.id_generators import MultiWriterIdGenerator
5050
from synapse.types import JsonDict, StrCollection
51-
from synapse.util import Duration
5251
from synapse.util.caches.expiringcache import ExpiringCache
5352
from synapse.util.caches.stream_change_cache import StreamChangeCache
53+
from synapse.util.duration import Duration
5454
from synapse.util.iterutils import batch_iter
5555
from synapse.util.json import json_encoder
5656
from synapse.util.stringutils import parse_and_validate_server_name
@@ -62,10 +62,10 @@
6262

6363

6464
# How long to keep messages in the device federation inbox before deleting them.
65-
DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS = 7 * Duration.DAY_MS
65+
DEVICE_FEDERATION_INBOX_CLEANUP_DELAY = Duration(days=7)
6666

6767
# How often to run the task to clean up old device_federation_inbox rows.
68-
DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL_MS = 5 * Duration.MINUTE_MS
68+
DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL = Duration(minutes=5)
6969

7070
# Update name for the device federation inbox received timestamp index.
7171
DEVICE_FEDERATION_INBOX_RECEIVED_INDEX_UPDATE = (
@@ -152,7 +152,7 @@ def __init__(
152152
if hs.config.worker.run_background_tasks:
153153
self.clock.looping_call(
154154
run_as_background_process,
155-
DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL_MS,
155+
DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL.as_millis(),
156156
"_delete_old_federation_inbox_rows",
157157
self.server_name,
158158
self._delete_old_federation_inbox_rows,
@@ -996,9 +996,10 @@ async def _delete_old_federation_inbox_rows(self, batch_size: int = 1000) -> Non
996996

997997
def _delete_old_federation_inbox_rows_txn(txn: LoggingTransaction) -> bool:
998998
# We delete at most 100 rows that are older than
999-
# DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS
999+
# DEVICE_FEDERATION_INBOX_CLEANUP_DELAY
10001000
delete_before_ts = (
1001-
self.clock.time_msec() - DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS
1001+
self.clock.time_msec()
1002+
- DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_millis()
10021003
)
10031004
sql = """
10041005
WITH to_delete AS (

synapse/storage/databases/main/sliding_sync.py

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -37,12 +37,7 @@
3737
RoomSyncConfig,
3838
)
3939
from synapse.util.caches.descriptors import cached
40-
from synapse.util.constants import (
41-
MILLISECONDS_PER_SECOND,
42-
ONE_DAY_SECONDS,
43-
ONE_HOUR_SECONDS,
44-
ONE_MINUTE_SECONDS,
45-
)
40+
from synapse.util.duration import Duration
4641
from synapse.util.json import json_encoder
4742

4843
if TYPE_CHECKING:
@@ -57,14 +52,14 @@
5752
# position. We don't want to update it on every use to avoid excessive
5853
# writes, but we want it to be reasonably up-to-date to help with
5954
# cleaning up old connection positions.
60-
UPDATE_INTERVAL_LAST_USED_TS_MS = 5 * ONE_MINUTE_SECONDS * MILLISECONDS_PER_SECOND
55+
UPDATE_INTERVAL_LAST_USED_TS = Duration(minutes=5)
6156

6257
# Time in milliseconds the connection hasn't been used before we consider it
6358
# expired and delete it.
64-
CONNECTION_EXPIRY_MS = 7 * ONE_DAY_SECONDS * MILLISECONDS_PER_SECOND
59+
CONNECTION_EXPIRY = Duration(days=7)
6560

6661
# How often we run the background process to delete old sliding sync connections.
67-
CONNECTION_EXPIRY_FREQUENCY_MS = ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND
62+
CONNECTION_EXPIRY_FREQUENCY = Duration(hours=1)
6863

6964

7065
class SlidingSyncStore(SQLBaseStore):
@@ -101,7 +96,7 @@ def __init__(
10196
if self.hs.config.worker.run_background_tasks:
10297
self.clock.looping_call(
10398
self.delete_old_sliding_sync_connections,
104-
CONNECTION_EXPIRY_FREQUENCY_MS,
99+
CONNECTION_EXPIRY_FREQUENCY.as_millis(),
105100
)
106101

107102
async def get_latest_bump_stamp_for_room(
@@ -430,7 +425,10 @@ def _get_and_clear_connection_positions_txn(
430425
# Update the `last_used_ts` if it's due to be updated. We don't update
431426
# every time to avoid excessive writes.
432427
now = self.clock.time_msec()
433-
if last_used_ts is None or now - last_used_ts > UPDATE_INTERVAL_LAST_USED_TS_MS:
428+
if (
429+
last_used_ts is None
430+
or now - last_used_ts > UPDATE_INTERVAL_LAST_USED_TS.as_millis()
431+
):
434432
self.db_pool.simple_update_txn(
435433
txn,
436434
table="sliding_sync_connections",
@@ -532,7 +530,7 @@ def _get_and_clear_connection_positions_txn(
532530
@wrap_as_background_process("delete_old_sliding_sync_connections")
533531
async def delete_old_sliding_sync_connections(self) -> None:
534532
"""Delete sliding sync connections that have not been used for a long time."""
535-
cutoff_ts = self.clock.time_msec() - CONNECTION_EXPIRY_MS
533+
cutoff_ts = self.clock.time_msec() - CONNECTION_EXPIRY.as_millis()
536534

537535
def delete_old_sliding_sync_connections_txn(txn: LoggingTransaction) -> None:
538536
sql = """

synapse/util/__init__.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -41,15 +41,6 @@
4141
logger = logging.getLogger(__name__)
4242

4343

44-
class Duration:
45-
"""Helper class that holds constants for common time durations in
46-
milliseconds."""
47-
48-
MINUTE_MS = 60 * 1000
49-
HOUR_MS = 60 * MINUTE_MS
50-
DAY_MS = 24 * HOUR_MS
51-
52-
5344
def unwrapFirstError(failure: Failure) -> Failure:
5445
# Deprecated: you probably just want to catch defer.FirstError and reraise
5546
# the subFailure's value, which will do a better job of preserving stacktraces.

synapse/util/background_queue.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
from twisted.internet import defer
2828

2929
from synapse.util.async_helpers import DeferredEvent
30-
from synapse.util.constants import MILLISECONDS_PER_SECOND
30+
from synapse.util.duration import Duration
3131

3232
if TYPE_CHECKING:
3333
from synapse.server import HomeServer
@@ -67,7 +67,7 @@ def __init__(
6767
self._hs = hs
6868
self._name = name
6969
self._callback = callback
70-
self._timeout_ms = timeout_ms
70+
self._timeout_ms = Duration(milliseconds=timeout_ms)
7171

7272
# The queue of items to process.
7373
self._queue: collections.deque[T] = collections.deque()
@@ -125,7 +125,7 @@ async def _process_queue(self) -> None:
125125
# just loop round, clear the event, recheck the queue, and then
126126
# wait here again.
127127
new_data = await self._wakeup_event.wait(
128-
timeout_seconds=self._timeout_ms / MILLISECONDS_PER_SECOND
128+
timeout_seconds=self._timeout_ms.as_secs()
129129
)
130130
if not new_data:
131131
# Timed out waiting for new data, so exit the loop

synapse/util/constants.py

Lines changed: 0 additions & 23 deletions
This file was deleted.

0 commit comments

Comments
 (0)