|
9 | 9 |
|
10 | 10 | from celery import current_app, schedules |
11 | 11 | from celery.beat import ScheduleEntry, Scheduler |
| 12 | +from celery.signals import beat_init |
12 | 13 | from celery.utils.log import get_logger |
13 | 14 | from sqlalchemy import select |
14 | 15 | from sqlalchemy.exc import DatabaseError, InterfaceError |
|
28 | 29 | # 此计划程序必须比常规的 5 分钟更频繁地唤醒,因为它需要考虑对计划的外部更改 |
29 | 30 | DEFAULT_MAX_INTERVAL = 5 # seconds |
30 | 31 |
|
| 32 | +# 计划锁定时长,避免重复运行 |
| 33 | +DEFAULT_MAX_LOCK = 300 # seconds |
| 34 | + |
| 35 | +# Copied from: |
| 36 | +# https://github.com/andymccurdy/redis-py/blob/master/redis/lock.py#L33 |
| 37 | +# Changes: |
| 38 | +# The second line from the bottom: The original Lua script intends |
| 39 | +# to extend time to (lock remaining time + additional time); while |
| 40 | +# the script here extend time to an expected expiration time. |
| 41 | +# KEYS[1] - lock name |
| 42 | +# ARGS[1] - token |
| 43 | +# ARGS[2] - additional milliseconds |
| 44 | +# return 1 if the locks time was extended, otherwise 0 |
| 45 | +LUA_EXTEND_TO_SCRIPT = """ |
| 46 | + local token = redis.call('get', KEYS[1]) |
| 47 | + if not token or token ~= ARGV[1] then |
| 48 | + return 0 |
| 49 | + end |
| 50 | + local expiration = redis.call('pttl', KEYS[1]) |
| 51 | + if not expiration then |
| 52 | + expiration = 0 |
| 53 | + end |
| 54 | + if expiration < 0 then |
| 55 | + return 0 |
| 56 | + end |
| 57 | + redis.call('pexpire', KEYS[1], ARGV[2]) |
| 58 | + return 1 |
| 59 | +""" |
| 60 | + |
31 | 61 | logger = get_logger('fba.schedulers') |
32 | 62 |
|
33 | 63 |
|
@@ -188,21 +218,12 @@ async def to_model_schedule(name: str, task: str, schedule: schedules.schedule | |
188 | 218 | if not obj: |
189 | 219 | obj = TaskScheduler(**CreateTaskSchedulerParam(task=task, **spec).model_dump()) |
190 | 220 | elif isinstance(schedule, schedules.crontab): |
191 | | - crontab_minute = schedule._orig_minute if crontab_verify('m', schedule._orig_minute, False) else '*' |
192 | | - crontab_hour = schedule._orig_hour if crontab_verify('h', schedule._orig_hour, False) else '*' |
193 | | - crontab_day_of_week = ( |
194 | | - schedule._orig_day_of_week if crontab_verify('dom', schedule._orig_day_of_week, False) else '*' |
195 | | - ) |
196 | | - crontab_day_of_month = ( |
197 | | - schedule._orig_day_of_month if crontab_verify('dom', schedule._orig_day_of_month, False) else '*' |
198 | | - ) |
199 | | - crontab_month_of_year = ( |
200 | | - schedule._orig_month_of_year if crontab_verify('moy', schedule._orig_month_of_year, False) else '*' |
201 | | - ) |
| 221 | + crontab = f'{schedule._orig_minute} {schedule._orig_hour} {schedule._orig_day_of_week} {schedule._orig_day_of_month} {schedule._orig_month_of_year}' # noqa: E501 |
| 222 | + crontab_verify(crontab) |
202 | 223 | spec = { |
203 | 224 | 'name': name, |
204 | 225 | 'type': TaskSchedulerType.CRONTAB.value, |
205 | | - 'crontab': f'{crontab_minute} {crontab_hour} {crontab_day_of_week} {crontab_day_of_month} {crontab_month_of_year}', # noqa: E501 |
| 226 | + 'crontab': crontab, |
206 | 227 | } |
207 | 228 | stmt = select(TaskScheduler).filter_by(**spec) |
208 | 229 | query = await db.execute(stmt) |
@@ -269,13 +290,18 @@ def _unpack_options( |
269 | 290 |
|
270 | 291 |
|
271 | 292 | class DatabaseScheduler(Scheduler): |
| 293 | + """数据库调度程序""" |
| 294 | + |
272 | 295 | Entry = ModelEntry |
273 | 296 |
|
274 | 297 | _schedule = None |
275 | 298 | _last_update = None |
276 | 299 | _initial_read = True |
277 | 300 | _heap_invalidated = False |
278 | 301 |
|
| 302 | + lock = None |
| 303 | + lock_key = f'{settings.CELERY_REDIS_PREFIX}:beat_lock' |
| 304 | + |
279 | 305 | def __init__(self, *args, **kwargs): |
280 | 306 | self.app = kwargs['app'] |
281 | 307 | self._dirty = set() |
@@ -324,6 +350,16 @@ def reserve(self, entry): |
324 | 350 | self._dirty.add(new_entry.name) |
325 | 351 | return new_entry |
326 | 352 |
|
| 353 | + def close(self): |
| 354 | + """重写父函数""" |
| 355 | + if self.lock: |
| 356 | + logger.info('beat: Releasing lock') |
| 357 | + if run_await(self.lock.owned)(): |
| 358 | + run_await(self.lock.release)() |
| 359 | + self.lock = None |
| 360 | + |
| 361 | + self.sync() |
| 362 | + |
327 | 363 | def sync(self): |
328 | 364 | """重写父函数""" |
329 | 365 | _tried = set() |
@@ -410,3 +446,29 @@ def schedule(self) -> dict[str, ModelEntry]: |
410 | 446 |
|
411 | 447 | # logger.debug(self._schedule) |
412 | 448 | return self._schedule |
| 449 | + |
| 450 | + |
| 451 | +@beat_init.connect |
| 452 | +def acquire_distributed_beat_lock(sender=None, *args, **kwargs): |
| 453 | + """ |
| 454 | + 尝试在启动时获取锁 |
| 455 | +
|
| 456 | + :param sender: 接收方应响应的发送方 |
| 457 | + :return: |
| 458 | + """ |
| 459 | + scheduler = sender.scheduler |
| 460 | + if not scheduler.lock_key: |
| 461 | + return |
| 462 | + |
| 463 | + logger.debug('beat: Acquiring lock...') |
| 464 | + lock = redis_client.lock( |
| 465 | + scheduler.lock_key, |
| 466 | + timeout=DEFAULT_MAX_LOCK, |
| 467 | + sleep=scheduler.max_interval, |
| 468 | + ) |
| 469 | + # overwrite redis-py's extend script |
| 470 | + # which will add additional timeout instead of extend to a new timeout |
| 471 | + lock.lua_extend = redis_client.register_script(LUA_EXTEND_TO_SCRIPT) |
| 472 | + run_await(lock.acquire)() |
| 473 | + logger.info('beat: Acquired lock') |
| 474 | + scheduler.lock = lock |
0 commit comments