diff --git a/docs/commands.md b/docs/commands.md
index 7020cf9..8cb61f7 100644
--- a/docs/commands.md
+++ b/docs/commands.md
@@ -27,8 +27,6 @@ options:
--max-jobs MAX_JOBS Maximum number of jobs to execute before terminating worker
--fork-job-execution FORK_JOB_EXECUTION
Fork job execution to another process
- --job-class JOB_CLASS
- Jobs class to use
--sentry-dsn SENTRY_DSN
Sentry DSN to use
--sentry-debug Enable Sentry debug mode
diff --git a/poetry.lock b/poetry.lock
index db94958..a551d02 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
[[package]]
name = "asgiref"
@@ -6,7 +6,6 @@ version = "3.8.1"
description = "ASGI specs, helper code, and adapters"
optional = false
python-versions = ">=3.8"
-groups = ["main"]
files = [
{file = "asgiref-3.8.1-py3-none-any.whl", hash = "sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47"},
{file = "asgiref-3.8.1.tar.gz", hash = "sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590"},
@@ -24,12 +23,10 @@ version = "5.0.1"
description = "Timeout context manager for asyncio programs"
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev"]
files = [
{file = "async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c"},
{file = "async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3"},
]
-markers = {main = "python_version < \"3.11.3\"", dev = "python_full_version < \"3.11.3\""}
[[package]]
name = "backports-tarfile"
@@ -37,8 +34,6 @@ version = "1.2.0"
description = "Backport of CPython tarfile module"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
-markers = "python_version < \"3.12\""
files = [
{file = "backports.tarfile-1.2.0-py3-none-any.whl", hash = "sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34"},
{file = "backports_tarfile-1.2.0.tar.gz", hash = "sha256:d75e02c268746e1b8144c278978b6e98e85de6ad16f8e4b0844a154557eca991"},
@@ -54,7 +49,6 @@ version = "24.10.0"
description = "The uncompromising code formatter."
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "black-24.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812"},
{file = "black-24.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea"},
@@ -101,7 +95,6 @@ version = "1.2.2.post1"
description = "A simple, correct Python build frontend"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "build-1.2.2.post1-py3-none-any.whl", hash = "sha256:1d61c0887fa860c01971625baae8bdd338e517b836a2f70dd1f7aa3a6b2fc5b5"},
{file = "build-1.2.2.post1.tar.gz", hash = "sha256:b36993e92ca9375a219c99e606a122ff365a760a2d4bba0caa09bd5278b608b7"},
@@ -127,7 +120,6 @@ version = "0.14.2"
description = "httplib2 caching for requests"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "cachecontrol-0.14.2-py3-none-any.whl", hash = "sha256:ebad2091bf12d0d200dfc2464330db638c5deb41d546f6d7aca079e87290f3b0"},
{file = "cachecontrol-0.14.2.tar.gz", hash = "sha256:7d47d19f866409b98ff6025b6a0fca8e4c791fb31abbd95f622093894ce903a2"},
@@ -149,7 +141,6 @@ version = "2025.1.31"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
-groups = ["dev"]
files = [
{file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"},
{file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"},
@@ -161,8 +152,6 @@ version = "1.17.1"
description = "Foreign Function Interface for Python calling C code."
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
-markers = "sys_platform == \"linux\" and platform_python_implementation != \"PyPy\" or sys_platform == \"darwin\""
files = [
{file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"},
{file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"},
@@ -242,7 +231,6 @@ version = "3.4.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
-groups = ["dev"]
files = [
{file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
@@ -344,7 +332,6 @@ version = "2.1.0"
description = "Cleo allows you to create beautiful and testable command-line interfaces."
optional = false
python-versions = ">=3.7,<4.0"
-groups = ["dev"]
files = [
{file = "cleo-2.1.0-py3-none-any.whl", hash = "sha256:4a31bd4dd45695a64ee3c4758f583f134267c2bc518d8ae9a29cf237d009b07e"},
{file = "cleo-2.1.0.tar.gz", hash = "sha256:0b2c880b5d13660a7ea651001fb4acb527696c01f15c9ee650f377aa543fd523"},
@@ -360,7 +347,6 @@ version = "8.1.8"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.7"
-groups = ["main", "dev"]
files = [
{file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
{file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"},
@@ -375,12 +361,10 @@ version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
-groups = ["main", "dev"]
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
-markers = {main = "platform_system == \"Windows\"", dev = "platform_system == \"Windows\" or os_name == \"nt\""}
[[package]]
name = "coverage"
@@ -388,7 +372,6 @@ version = "7.6.12"
description = "Code coverage measurement for Python"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "coverage-7.6.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:704c8c8c6ce6569286ae9622e534b4f5b9759b6f2cd643f1c1a61f666d534fe8"},
{file = "coverage-7.6.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ad7525bf0241e5502168ae9c643a2f6c219fa0a283001cee4cf23a9b7da75879"},
@@ -464,7 +447,6 @@ version = "0.4.1"
description = "Manage Python errors with ease"
optional = false
python-versions = ">=3.7,<4.0"
-groups = ["dev"]
files = [
{file = "crashtest-0.4.1-py3-none-any.whl", hash = "sha256:8d23eac5fa660409f57472e3851dab7ac18aba459a8d19cbbba86d3d5aecd2a5"},
{file = "crashtest-0.4.1.tar.gz", hash = "sha256:80d7b1f316ebfbd429f648076d6275c877ba30ba48979de4191714a75266f0ce"},
@@ -476,7 +458,6 @@ version = "6.0.0"
description = "croniter provides iteration for datetime object with cron like format"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.6"
-groups = ["main"]
files = [
{file = "croniter-6.0.0-py2.py3-none-any.whl", hash = "sha256:2f878c3856f17896979b2a4379ba1f09c83e374931ea15cc835c5dd2eee9b368"},
{file = "croniter-6.0.0.tar.gz", hash = "sha256:37c504b313956114a983ece2c2b07790b1f1094fe9d81cc94739214748255577"},
@@ -492,8 +473,6 @@ version = "44.0.1"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
optional = false
python-versions = "!=3.9.0,!=3.9.1,>=3.7"
-groups = ["dev"]
-markers = "sys_platform == \"linux\""
files = [
{file = "cryptography-44.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf688f615c29bfe9dfc44312ca470989279f0e94bb9f631f85e3459af8efc009"},
{file = "cryptography-44.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd7c7e2d71d908dc0f8d2027e1604102140d84b155e658c20e8ad1304317691f"},
@@ -547,7 +526,6 @@ version = "0.3.9"
description = "Distribution utilities"
optional = false
python-versions = "*"
-groups = ["dev"]
files = [
{file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"},
{file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"},
@@ -559,7 +537,6 @@ version = "5.1.6"
description = "A high-level Python web framework that encourages rapid development and clean, pragmatic design."
optional = false
python-versions = ">=3.10"
-groups = ["main"]
files = [
{file = "Django-5.1.6-py3-none-any.whl", hash = "sha256:8d203400bc2952fbfb287c2bbda630297d654920c72a73cc82a9ad7926feaad5"},
{file = "Django-5.1.6.tar.gz", hash = "sha256:1e39eafdd1b185e761d9fab7a9f0b9fa00af1b37b25ad980a8aa0dac13535690"},
@@ -580,7 +557,6 @@ version = "0.22.7"
description = "Python Git Library"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "dulwich-0.22.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:01e484d44014fef78cdef3b3adc34564808b4677497a57a0950c90a1d6349be3"},
{file = "dulwich-0.22.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb258c62d7fb4cfe03b3fba09f702ebb84a924f2f004833435e32c93fe8a7f13"},
@@ -642,7 +618,6 @@ version = "2.27.0"
description = "Python implementation of redis API, can be used for testing purposes."
optional = false
python-versions = "<4.0,>=3.7"
-groups = ["dev"]
files = [
{file = "fakeredis-2.27.0-py3-none-any.whl", hash = "sha256:f4b6e0fa4193acbf00d81dac71ff5cc34fe7d7c12f1560b036f98578a103d5c3"},
{file = "fakeredis-2.27.0.tar.gz", hash = "sha256:7b7584ec104392592297f46864a82cb7339a23e254ee885bf5ae07cfc64fbce7"},
@@ -667,7 +642,6 @@ version = "2.21.1"
description = "Fastest Python implementation of JSON schema"
optional = false
python-versions = "*"
-groups = ["dev"]
files = [
{file = "fastjsonschema-2.21.1-py3-none-any.whl", hash = "sha256:c9e5b7e908310918cf494a434eeb31384dd84a98b57a30bcb1f535015b554667"},
{file = "fastjsonschema-2.21.1.tar.gz", hash = "sha256:794d4f0a58f848961ba16af7b9c85a3e88cd360df008c59aac6fc5ae9323b5d4"},
@@ -682,7 +656,6 @@ version = "3.17.0"
description = "A platform independent file lock."
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "filelock-3.17.0-py3-none-any.whl", hash = "sha256:533dc2f7ba78dc2f0f531fc6c4940addf7b70a481e269a5a3b93be94ffbe8338"},
{file = "filelock-3.17.0.tar.gz", hash = "sha256:ee4e77401ef576ebb38cd7f13b9b28893194acc20a8e68e18730ba9c0e54660e"},
@@ -699,7 +672,6 @@ version = "7.1.1"
description = "the modular source code checker: pep8 pyflakes and co"
optional = false
python-versions = ">=3.8.1"
-groups = ["dev"]
files = [
{file = "flake8-7.1.1-py2.py3-none-any.whl", hash = "sha256:597477df7860daa5aa0fdd84bf5208a043ab96b8e96ab708770ae0364dd03213"},
{file = "flake8-7.1.1.tar.gz", hash = "sha256:049d058491e228e03e67b390f311bbf88fce2dbaa8fa673e7aea87b7198b8d38"},
@@ -716,7 +688,6 @@ version = "1.2.3"
description = "Flake8 plug-in loading the configuration from pyproject.toml"
optional = false
python-versions = ">= 3.6"
-groups = ["dev"]
files = [
{file = "flake8_pyproject-1.2.3-py3-none-any.whl", hash = "sha256:6249fe53545205af5e76837644dc80b4c10037e73a0e5db87ff562d75fb5bd4a"},
]
@@ -734,7 +705,6 @@ version = "1.5.1"
description = "Let your Python tests travel through time"
optional = false
python-versions = ">=3.7"
-groups = ["dev"]
files = [
{file = "freezegun-1.5.1-py3-none-any.whl", hash = "sha256:bf111d7138a8abe55ab48a71755673dbaa4ab87f4cff5634a4442dfec34c15f1"},
{file = "freezegun-1.5.1.tar.gz", hash = "sha256:b29dedfcda6d5e8e083ce71b2b542753ad48cfec44037b3fc79702e2980a89e9"},
@@ -749,7 +719,6 @@ version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
-groups = ["dev"]
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
@@ -764,8 +733,6 @@ version = "8.6.1"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
-markers = "python_version < \"3.12\""
files = [
{file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"},
{file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"},
@@ -789,7 +756,6 @@ version = "0.7.0"
description = "A library for installing Python wheels."
optional = false
python-versions = ">=3.7"
-groups = ["dev"]
files = [
{file = "installer-0.7.0-py3-none-any.whl", hash = "sha256:05d1933f0a5ba7d8d6296bb6d5018e7c94fa473ceb10cf198a92ccea19c27b53"},
{file = "installer-0.7.0.tar.gz", hash = "sha256:a26d3e3116289bb08216e0d0f7d925fcef0b0194eedfa0c944bcaaa106c4b631"},
@@ -801,7 +767,6 @@ version = "3.4.0"
description = "Utility functions for Python class constructs"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "jaraco.classes-3.4.0-py3-none-any.whl", hash = "sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790"},
{file = "jaraco.classes-3.4.0.tar.gz", hash = "sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd"},
@@ -820,7 +785,6 @@ version = "6.0.1"
description = "Useful decorators and context managers"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "jaraco.context-6.0.1-py3-none-any.whl", hash = "sha256:f797fc481b490edb305122c9181830a3a5b76d84ef6d1aef2fb9b47ab956f9e4"},
{file = "jaraco_context-6.0.1.tar.gz", hash = "sha256:9bae4ea555cf0b14938dc0aee7c9f32ed303aa20a3b73e7dc80111628792d1b3"},
@@ -839,7 +803,6 @@ version = "4.1.0"
description = "Functools like those found in stdlib"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "jaraco.functools-4.1.0-py3-none-any.whl", hash = "sha256:ad159f13428bc4acbf5541ad6dec511f91573b90fba04df61dafa2a1231cf649"},
{file = "jaraco_functools-4.1.0.tar.gz", hash = "sha256:70f7e0e2ae076498e212562325e805204fc092d7b4c17e0e86c959e249701a9d"},
@@ -862,8 +825,6 @@ version = "0.8.0"
description = "Low-level, pure Python DBus protocol wrapper."
optional = false
python-versions = ">=3.7"
-groups = ["dev"]
-markers = "sys_platform == \"linux\""
files = [
{file = "jeepney-0.8.0-py3-none-any.whl", hash = "sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755"},
{file = "jeepney-0.8.0.tar.gz", hash = "sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806"},
@@ -879,7 +840,6 @@ version = "25.6.0"
description = "Store and access your passwords safely."
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "keyring-25.6.0-py3-none-any.whl", hash = "sha256:552a3f7af126ece7ed5c89753650eec89c7eaae8617d0aa4d9ad2b75111266bd"},
{file = "keyring-25.6.0.tar.gz", hash = "sha256:0b39998aa941431eb3d9b0d4b2460bc773b9df6fed7621c2dfb291a7e0187a66"},
@@ -909,7 +869,6 @@ version = "2.4"
description = "Python wrapper around Lua and LuaJIT"
optional = false
python-versions = "*"
-groups = ["dev"]
files = [
{file = "lupa-2.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:518822e047b2c65146cf09efb287f28c2eb3ced38bcc661f881f33bcd9e2ba1f"},
{file = "lupa-2.4-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:15ce18c8b7642dd5b8f491c6e19fea6079f24f52e543c698622e5eb80b17b952"},
@@ -1014,7 +973,6 @@ version = "0.7.0"
description = "McCabe checker, plugin for flake8"
optional = false
python-versions = ">=3.6"
-groups = ["dev"]
files = [
{file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
@@ -1026,7 +984,6 @@ version = "10.6.0"
description = "More routines for operating on iterables, beyond itertools"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "more-itertools-10.6.0.tar.gz", hash = "sha256:2cd7fad1009c31cc9fb6a035108509e6547547a7a738374f10bd49a09eb3ee3b"},
{file = "more_itertools-10.6.0-py3-none-any.whl", hash = "sha256:6eb054cb4b6db1473f6e15fcc676a08e4732548acd47c708f0e179c2c7c01e89"},
@@ -1038,7 +995,6 @@ version = "1.1.0"
description = "MessagePack serializer"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd"},
{file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d"},
@@ -1112,7 +1068,6 @@ version = "1.0.0"
description = "Type system extensions for programs checked with the mypy type checker."
optional = false
python-versions = ">=3.5"
-groups = ["dev"]
files = [
{file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
@@ -1124,7 +1079,6 @@ version = "24.2"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
{file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
@@ -1136,7 +1090,6 @@ version = "0.12.1"
description = "Utility library for gitignore style pattern matching of file paths."
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
{file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
@@ -1148,7 +1101,6 @@ version = "1.12.0"
description = "Query metadata from sdists / bdists / installed packages."
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "pkginfo-1.12.0-py3-none-any.whl", hash = "sha256:dcd589c9be4da8973eceffa247733c144812759aa67eaf4bbf97016a02f39088"},
{file = "pkginfo-1.12.0.tar.gz", hash = "sha256:8ad91a0445a036782b9366ef8b8c2c50291f83a553478ba8580c73d3215700cf"},
@@ -1163,7 +1115,6 @@ version = "4.3.6"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"},
{file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"},
@@ -1180,7 +1131,6 @@ version = "2.0.1"
description = "Python dependency management and packaging made easy."
optional = false
python-versions = "<4.0,>=3.9"
-groups = ["dev"]
files = [
{file = "poetry-2.0.1-py3-none-any.whl", hash = "sha256:eb780a8acbd6eec4bc95e8ba104058c5129ea5a44115fc9b1fc0a2235412734d"},
{file = "poetry-2.0.1.tar.gz", hash = "sha256:a2987c3162f6ded6db890701a6fc657d2cfcc702e9421ef4c345211c8bffc5d5"},
@@ -1214,7 +1164,6 @@ version = "2.0.1"
description = "Poetry PEP 517 Build Backend"
optional = false
python-versions = "<4.0,>=3.9"
-groups = ["dev"]
files = [
{file = "poetry_core-2.0.1-py3-none-any.whl", hash = "sha256:a3c7009536522cda4eb0fb3805c9dc935b5537f8727dd01efb9c15e51a17552b"},
{file = "poetry_core-2.0.1.tar.gz", hash = "sha256:10177c2772469d9032a49f0d8707af761b1c597cea3b4fb31546e5cd436eb157"},
@@ -1226,7 +1175,6 @@ version = "2.12.1"
description = "Python style guide checker"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "pycodestyle-2.12.1-py2.py3-none-any.whl", hash = "sha256:46f0fb92069a7c28ab7bb558f05bfc0110dac69a0cd23c61ea0040283a9d78b3"},
{file = "pycodestyle-2.12.1.tar.gz", hash = "sha256:6838eae08bbce4f6accd5d5572075c63626a15ee3e6f842df996bf62f6d73521"},
@@ -1238,8 +1186,6 @@ version = "2.22"
description = "C parser in Python"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
-markers = "sys_platform == \"linux\" and platform_python_implementation != \"PyPy\" or sys_platform == \"darwin\""
files = [
{file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"},
{file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"},
@@ -1251,7 +1197,6 @@ version = "3.2.0"
description = "passive checker of Python programs"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "pyflakes-3.2.0-py2.py3-none-any.whl", hash = "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"},
{file = "pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f"},
@@ -1263,7 +1208,6 @@ version = "1.2.0"
description = "Wrappers to call pyproject.toml-based build backend hooks."
optional = false
python-versions = ">=3.7"
-groups = ["dev"]
files = [
{file = "pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913"},
{file = "pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8"},
@@ -1275,7 +1219,6 @@ version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
-groups = ["main", "dev"]
files = [
{file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
{file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
@@ -1290,7 +1233,6 @@ version = "2025.1"
description = "World timezone definitions, modern and historical"
optional = false
python-versions = "*"
-groups = ["main"]
files = [
{file = "pytz-2025.1-py2.py3-none-any.whl", hash = "sha256:89dd22dca55b46eac6eda23b2d72721bf1bdfef212645d81513ef5d03038de57"},
{file = "pytz-2025.1.tar.gz", hash = "sha256:c2db42be2a2518b28e65f9207c4d05e6ff547d1efa4086469ef855e4ab70178e"},
@@ -1302,8 +1244,6 @@ version = "0.2.3"
description = "A (partial) reimplementation of pywin32 using ctypes/cffi"
optional = false
python-versions = ">=3.6"
-groups = ["dev"]
-markers = "sys_platform == \"win32\""
files = [
{file = "pywin32-ctypes-0.2.3.tar.gz", hash = "sha256:d162dc04946d704503b2edc4d55f3dba5c1d539ead017afa00142c38b9885755"},
{file = "pywin32_ctypes-0.2.3-py3-none-any.whl", hash = "sha256:8a1513379d709975552d202d942d9837758905c8d01eb82b8bcc30918929e7b8"},
@@ -1315,7 +1255,6 @@ version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev"]
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
@@ -1378,7 +1317,6 @@ version = "3.12.1"
description = "rapid fuzzy string matching"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "rapidfuzz-3.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dbb7ea2fd786e6d66f225ef6eef1728832314f47e82fee877cb2a793ebda9579"},
{file = "rapidfuzz-3.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1ae41361de05762c1eaa3955e5355de7c4c6f30d1ef1ea23d29bf738a35809ab"},
@@ -1479,7 +1417,6 @@ version = "5.2.1"
description = "Python client for Redis database and key-value store"
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev"]
files = [
{file = "redis-5.2.1-py3-none-any.whl", hash = "sha256:ee7e1056b9aea0f04c6c2ed59452947f34c4940ee025f5dd83e6a6418b6989e4"},
{file = "redis-5.2.1.tar.gz", hash = "sha256:16f2e22dff21d5125e8481515e386711a34cbec50f0e44413dd7d9c060a54e0f"},
@@ -1498,7 +1435,6 @@ version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
@@ -1520,7 +1456,6 @@ version = "1.0.0"
description = "A utility belt for advanced users of python-requests"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-groups = ["dev"]
files = [
{file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"},
{file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"},
@@ -1529,30 +1464,12 @@ files = [
[package.dependencies]
requests = ">=2.0.1,<3.0.0"
-[[package]]
-name = "rq"
-version = "1.16.2"
-description = "RQ is a simple, lightweight, library for creating background jobs, and processing them."
-optional = false
-python-versions = ">=3.7"
-groups = ["main"]
-files = [
- {file = "rq-1.16.2-py3-none-any.whl", hash = "sha256:52e619f6cb469b00e04da74305045d244b75fecb2ecaa4f26422add57d3c5f09"},
- {file = "rq-1.16.2.tar.gz", hash = "sha256:5c5b9ad5fbaf792b8fada25cc7627f4d206a9a4455aced371d4f501cc3f13b34"},
-]
-
-[package.dependencies]
-click = ">=5"
-redis = ">=3.5"
-
[[package]]
name = "secretstorage"
version = "3.3.3"
description = "Python bindings to FreeDesktop.org Secret Service API"
optional = false
python-versions = ">=3.6"
-groups = ["dev"]
-markers = "sys_platform == \"linux\""
files = [
{file = "SecretStorage-3.3.3-py3-none-any.whl", hash = "sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99"},
{file = "SecretStorage-3.3.3.tar.gz", hash = "sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77"},
@@ -1562,13 +1479,67 @@ files = [
cryptography = ">=2.0"
jeepney = ">=0.6"
+[[package]]
+name = "sentry-sdk"
+version = "2.21.0"
+description = "Python client for Sentry (https://sentry.io)"
+optional = true
+python-versions = ">=3.6"
+files = [
+ {file = "sentry_sdk-2.21.0-py2.py3-none-any.whl", hash = "sha256:7623cfa9e2c8150948a81ca253b8e2bfe4ce0b96ab12f8cd78e3ac9c490fd92f"},
+ {file = "sentry_sdk-2.21.0.tar.gz", hash = "sha256:a6d38e0fb35edda191acf80b188ec713c863aaa5ad8d5798decb8671d02077b6"},
+]
+
+[package.dependencies]
+certifi = "*"
+urllib3 = ">=1.26.11"
+
+[package.extras]
+aiohttp = ["aiohttp (>=3.5)"]
+anthropic = ["anthropic (>=0.16)"]
+arq = ["arq (>=0.23)"]
+asyncpg = ["asyncpg (>=0.23)"]
+beam = ["apache-beam (>=2.12)"]
+bottle = ["bottle (>=0.12.13)"]
+celery = ["celery (>=3)"]
+celery-redbeat = ["celery-redbeat (>=2)"]
+chalice = ["chalice (>=1.16.0)"]
+clickhouse-driver = ["clickhouse-driver (>=0.2.0)"]
+django = ["django (>=1.8)"]
+falcon = ["falcon (>=1.4)"]
+fastapi = ["fastapi (>=0.79.0)"]
+flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"]
+grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"]
+http2 = ["httpcore[http2] (==1.*)"]
+httpx = ["httpx (>=0.16.0)"]
+huey = ["huey (>=2)"]
+huggingface-hub = ["huggingface_hub (>=0.22)"]
+langchain = ["langchain (>=0.0.210)"]
+launchdarkly = ["launchdarkly-server-sdk (>=9.8.0)"]
+litestar = ["litestar (>=2.0.0)"]
+loguru = ["loguru (>=0.5)"]
+openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"]
+openfeature = ["openfeature-sdk (>=0.7.1)"]
+opentelemetry = ["opentelemetry-distro (>=0.35b0)"]
+opentelemetry-experimental = ["opentelemetry-distro"]
+pure-eval = ["asttokens", "executing", "pure_eval"]
+pymongo = ["pymongo (>=3.1)"]
+pyspark = ["pyspark (>=2.4.4)"]
+quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
+rq = ["rq (>=0.6)"]
+sanic = ["sanic (>=0.8)"]
+sqlalchemy = ["sqlalchemy (>=1.2)"]
+starlette = ["starlette (>=0.19.1)"]
+starlite = ["starlite (>=1.48)"]
+tornado = ["tornado (>=6)"]
+unleash = ["UnleashClient (>=6.0.1)"]
+
[[package]]
name = "shellingham"
version = "1.5.4"
description = "Tool to Detect Surrounding Shell"
optional = false
python-versions = ">=3.7"
-groups = ["dev"]
files = [
{file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"},
{file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"},
@@ -1580,7 +1551,6 @@ version = "1.17.0"
description = "Python 2 and 3 compatibility utilities"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
-groups = ["main", "dev"]
files = [
{file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"},
{file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"},
@@ -1592,7 +1562,6 @@ version = "2.4.0"
description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set"
optional = false
python-versions = "*"
-groups = ["dev"]
files = [
{file = "sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"},
{file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"},
@@ -1604,7 +1573,6 @@ version = "0.5.3"
description = "A non-validating SQL parser."
optional = false
python-versions = ">=3.8"
-groups = ["main"]
files = [
{file = "sqlparse-0.5.3-py3-none-any.whl", hash = "sha256:cf2196ed3418f3ba5de6af7e82c694a9fbdbfecccdfc72e281548517081f16ca"},
{file = "sqlparse-0.5.3.tar.gz", hash = "sha256:09f67787f56a0b16ecdbde1bfc7f5d9c3371ca683cfeaa8e6ff60b4807ec9272"},
@@ -1620,8 +1588,6 @@ version = "2.2.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
-markers = "python_version < \"3.11\""
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
@@ -1663,7 +1629,6 @@ version = "0.13.2"
description = "Style preserving TOML library"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"},
{file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"},
@@ -1675,7 +1640,6 @@ version = "2025.1.15.22"
description = "Canonical source for classifiers on PyPI (pypi.org)."
optional = false
python-versions = "*"
-groups = ["dev"]
files = [
{file = "trove_classifiers-2025.1.15.22-py3-none-any.whl", hash = "sha256:5f19c789d4f17f501d36c94dbbf969fb3e8c2784d008e6f5164dd2c3d6a2b07c"},
{file = "trove_classifiers-2025.1.15.22.tar.gz", hash = "sha256:90af74358d3a01b3532bc7b3c88d8c6a094c2fd50a563d13d9576179326d7ed9"},
@@ -1687,8 +1651,6 @@ version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev"]
-markers = "python_version < \"3.11\""
files = [
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
@@ -1700,8 +1662,6 @@ version = "2025.1"
description = "Provider of IANA time zone data"
optional = false
python-versions = ">=2"
-groups = ["main"]
-markers = "sys_platform == \"win32\""
files = [
{file = "tzdata-2025.1-py2.py3-none-any.whl", hash = "sha256:7e127113816800496f027041c570f50bcd464a020098a3b6b199517772303639"},
{file = "tzdata-2025.1.tar.gz", hash = "sha256:24894909e88cdb28bd1636c6887801df64cb485bd593f2fd83ef29075a81d694"},
@@ -1713,7 +1673,6 @@ version = "2.3.0"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"},
{file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"},
@@ -1731,8 +1690,6 @@ version = "6.1.0"
description = "Python client for Valkey forked from redis-py"
optional = true
python-versions = ">=3.8"
-groups = ["main"]
-markers = "extra == \"valkey\""
files = [
{file = "valkey-6.1.0-py3-none-any.whl", hash = "sha256:cfe769edae894f74ac946eff1e93f7d7f466032c3030ba7e9d089a742459ac9c"},
{file = "valkey-6.1.0.tar.gz", hash = "sha256:a652df15ed89c41935ffae6dfd09c56f4a9ab80b592e5ed9204d538e2ddad6d3"},
@@ -1751,7 +1708,6 @@ version = "20.29.2"
description = "Virtual Python Environment builder"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "virtualenv-20.29.2-py3-none-any.whl", hash = "sha256:febddfc3d1ea571bdb1dc0f98d7b45d24def7428214d4fb73cc486c9568cce6a"},
{file = "virtualenv-20.29.2.tar.gz", hash = "sha256:fdaabebf6d03b5ba83ae0a02cfe96f48a716f4fae556461d180825866f75b728"},
@@ -1772,8 +1728,6 @@ version = "1.1.4"
description = "Python wrapper for extended filesystem attributes"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
-markers = "sys_platform == \"darwin\""
files = [
{file = "xattr-1.1.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:acb85b6249e9f3ea10cbb56df1021d43f4027212f0d004304bc9075dc7f54769"},
{file = "xattr-1.1.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1a848ab125c0fafdc501ccd83b4c9018bba576a037a4ca5960a22f39e295552e"},
@@ -1859,8 +1813,6 @@ version = "3.21.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
-markers = "python_version < \"3.12\""
files = [
{file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"},
{file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"},
@@ -1875,10 +1827,11 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools",
type = ["pytest-mypy"]
[extras]
+sentry = ["sentry-sdk"]
valkey = ["valkey"]
yaml = ["pyyaml"]
[metadata]
-lock-version = "2.1"
+lock-version = "2.0"
python-versions = "^3.10"
-content-hash = "902af24e29726fb68e80f9fcc5f54dbf9e543537c7e5575d47899c61bc21e729"
+content-hash = "9737e5185c7efdc9ead3d41e26c4f2d59cb3abd7b91b42ad4cc1a05e9b87d8a5"
diff --git a/pyproject.toml b/pyproject.toml
index 81d11d1..2df4e65 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -45,9 +45,9 @@ python = "^3.10"
django = ">=5"
croniter = ">=2.0"
click = "^8.1"
-rq = "^1.16"
pyyaml = { version = "^6.0", optional = true }
valkey = { version = "^6.0.2", optional = true}
+sentry-sdk = { version = "^2.19", optional = true }
[tool.poetry.dev-dependencies]
poetry = "^2.0.1"
@@ -61,6 +61,7 @@ freezegun = "^1.5"
[tool.poetry.extras]
yaml = ["pyyaml"]
valkey = ["valkey"]
+sentry = ["sentry-sdk"]
[tool.flake8]
max-line-length = 120
diff --git a/scheduler/_config_types.py b/scheduler/_config_types.py
new file mode 100644
index 0000000..05103e1
--- /dev/null
+++ b/scheduler/_config_types.py
@@ -0,0 +1,81 @@
+from dataclasses import dataclass
+from enum import Enum
+from typing import Callable, Dict, Optional, List, Tuple, Any, Self, Type
+
+from scheduler.helpers.timeouts import BaseDeathPenalty, UnixSignalDeathPenalty
+
+
+@dataclass
+class QueueConfiguration:
+ __CONNECTION_FIELDS__ = {
+ "URL",
+ "DB",
+ "UNIX_SOCKET_PATH",
+ "HOST",
+ "PORT",
+ "PASSWORD",
+ "SENTINELS",
+ "MASTER_NAME",
+ "SOCKET_TIMEOUT",
+ "SSL",
+ "CONNECTION_KWARGS",
+ }
+ DB: Optional[int] = None
+ CLIENT_KWARGS: Optional[Dict[str, Any]] = None
+
+ # Redis connection parameters, either UNIX_SOCKET_PATH/URL/separate params (HOST, PORT, PASSWORD) should be provided
+ UNIX_SOCKET_PATH: Optional[str] = None
+
+ URL: Optional[str] = None
+
+ HOST: Optional[str] = None
+ PORT: Optional[int] = None
+ USERNAME: Optional[str] = None
+ PASSWORD: Optional[str] = None
+
+ SSL: Optional[bool] = False
+ SSL_CERT_REQS: Optional[str] = "required"
+
+ DEFAULT_TIMEOUT: Optional[int] = None
+ ASYNC: Optional[bool] = True
+
+ SENTINELS: Optional[List[Tuple[str, int]]] = None
+ SENTINEL_KWARGS: Optional[Dict[str, str]] = None
+ SOCKET_TIMEOUT: Optional[int] = None
+ MASTER_NAME: Optional[str] = None
+ CONNECTION_KWARGS: Optional[Dict[str, Any]] = None
+
+ def same_connection_params(self, other: Self) -> bool:
+ for field in self.__CONNECTION_FIELDS__:
+ if getattr(self, field) != getattr(other, field):
+ return False
+ return True
+
+
+class Broker(Enum):
+ REDIS = "redis"
+ FAKEREDIS = "fakeredis"
+ VALKEY = "valkey"
+
+
+def _token_validation(token: str) -> bool:
+ return False
+
+
+@dataclass
+class SchedulerConfig:
+ EXECUTIONS_IN_PAGE: int = 20
+ SCHEDULER_INTERVAL: int = 10
+ BROKER: Broker = Broker.REDIS
+ TOKEN_VALIDATION_METHOD: Callable[[str], bool] = _token_validation
+ CALLBACK_TIMEOUT = 60 # Callback timeout in seconds (success/failure)
+ # Default values, can be override per task
+ DEFAULT_RESULT_TTL: int = 500 # Time To Live (TTL) in seconds to keep job results
+ DEFAULT_FAILURE_TTL: int = 31536000 # Time To Live (TTL) in seconds to keep job failure information
+ DEFAULT_JOB_TIMEOUT: int = 300 # timeout (seconds) for a job)
+ # General configuration values
+ DEFAULT_WORKER_TTL = 420 # Time To Live (TTL) in seconds to keep worker information after last heartbeat
+ DEFAULT_MAINTENANCE_TASK_INTERVAL = 10 * 60 # The interval to run maintenance tasks in seconds. 10 minutes.
+ DEFAULT_JOB_MONITORING_INTERVAL = 30 # The interval to monitor jobs in seconds.
+ SCHEDULER_FALLBACK_PERIOD_SECS: int = 120 # Period (secs) to wait before requiring to reacquire locks
+ DEATH_PENALTY_CLASS : Type[BaseDeathPenalty] = UnixSignalDeathPenalty
\ No newline at end of file
diff --git a/scheduler/admin/task_admin.py b/scheduler/admin/task_admin.py
index 4e22905..04fe265 100644
--- a/scheduler/admin/task_admin.py
+++ b/scheduler/admin/task_admin.py
@@ -1,13 +1,23 @@
+from typing import List
+
from django.contrib import admin, messages
from django.contrib.contenttypes.admin import GenericStackedInline
from django.utils.translation import gettext_lazy as _
-from scheduler import tools
+from scheduler.helpers import tools
from scheduler.broker_types import ConnectionErrorTypes
from scheduler.models.args import TaskArg, TaskKwarg
from scheduler.models.task import Task
+from scheduler.helpers.queues import get_queue
+from scheduler.redis_models import JobModel
from scheduler.settings import SCHEDULER_CONFIG, logger
-from scheduler.tools import get_job_executions_for_task, TaskType
+
+
+def get_job_executions_for_task(queue_name, scheduled_task) -> List[JobModel]:
+ queue = get_queue(queue_name)
+ job_list: List[JobModel] = JobModel.get_many(queue.queued_job_registry.all(), connection=queue.connection)
+ res = list(filter(lambda j: j.is_execution_of(scheduled_task), job_list))
+ return res
class JobArgInline(GenericStackedInline):
@@ -105,11 +115,11 @@ class Media:
@admin.display(description="Schedule")
def task_schedule(self, o: Task) -> str:
- if o.task_type == TaskType.ONCE.value:
+ if o.task_type == tools.TaskType.ONCE.value:
return f"Run once: {o.scheduled_time:%Y-%m-%d %H:%M:%S}"
- elif o.task_type == TaskType.CRON.value:
+ elif o.task_type == tools.TaskType.CRON.value:
return f"Cron: {o.cron_string}"
- elif o.task_type == TaskType.REPEATABLE.value:
+ elif o.task_type == tools.TaskType.REPEATABLE.value:
if o.interval is None or o.interval_unit is None:
return ""
return "Repeatable: {} {}".format(o.interval, o.get_interval_unit_display())
@@ -160,8 +170,9 @@ def disable_selected(self, request, queryset):
rows_updated += 1
level = messages.WARNING if not rows_updated else messages.INFO
- self.message_user(request, f"{get_message_bit(rows_updated)} successfully disabled and unscheduled.",
- level=level)
+ self.message_user(
+ request, f"{get_message_bit(rows_updated)} successfully disabled and unscheduled.", level=level
+ )
@admin.action(description=_("Enable selected %(verbose_name_plural)s"), permissions=("change",))
def enable_selected(self, request, queryset):
diff --git a/scheduler/broker_types.py b/scheduler/broker_types.py
index e162e7b..56693ec 100644
--- a/scheduler/broker_types.py
+++ b/scheduler/broker_types.py
@@ -1,6 +1,7 @@
# This is a helper module to obfuscate types used by different broker implementations.
from collections import namedtuple
-from typing import Union, Dict, Tuple, Type
+from typing import Any, Callable, TypeVar, Union
+from typing import Dict, Tuple
import redis
@@ -11,13 +12,16 @@
valkey.Valkey = redis.Redis
valkey.StrictValkey = redis.StrictRedis
-from scheduler.settings import Broker
+from scheduler._config_types import Broker
ConnectionErrorTypes = (redis.ConnectionError, valkey.ConnectionError)
ResponseErrorTypes = (redis.ResponseError, valkey.ResponseError)
+TimeoutErrorType = Union[redis.TimeoutError, valkey.TimeoutError]
+WatchErrorType = Union[redis.WatchError, valkey.WatchError]
ConnectionType = Union[redis.Redis, valkey.Valkey]
PipelineType = Union[redis.client.Pipeline, valkey.client.Pipeline]
SentinelType = Union[redis.sentinel.Sentinel, valkey.sentinel.Sentinel]
+FunctionReferenceType = TypeVar("FunctionReferenceType", str, Callable[..., Any])
BrokerMetaDataType = namedtuple("BrokerMetaDataType", ["connection_type", "sentinel_type", "ssl_prefix"])
@@ -28,3 +32,10 @@
(Broker.REDIS, True): BrokerMetaDataType(redis.StrictRedis, redis.sentinel.Sentinel, "rediss"),
(Broker.VALKEY, True): BrokerMetaDataType(valkey.StrictValkey, valkey.sentinel.Sentinel, "valkeys"),
}
+
+MODEL_NAMES = ["Task", ]
+TASK_TYPES = ["OnceTaskType", "RepeatableTaskType", "CronTaskType"]
+
+
+def is_pipeline(conn: ConnectionType) -> bool:
+ return isinstance(conn, redis.client.Pipeline) or isinstance(conn, valkey.client.Pipeline)
diff --git a/scheduler/decorators.py b/scheduler/decorators.py
index c8f7e94..6e9d723 100644
--- a/scheduler/decorators.py
+++ b/scheduler/decorators.py
@@ -1,43 +1,108 @@
+from functools import wraps
+from typing import Any, Callable, Dict, List, Optional, Union
+
from scheduler import settings
-from .queues import get_queue, QueueNotFoundError
-from .rq_classes import rq_job_decorator
+from scheduler.helpers.queues import Queue, get_queue
+from .broker_types import ConnectionType
+from .redis_models import Callback
JOB_METHODS_LIST = list()
-def job(*args, **kwargs):
- """
- The same as rq package's job decorator, but it automatically works out
- the ``connection`` argument from SCHEDULER_QUEUES.
+class job:
+ queue_class = Queue
+
+ def __init__(
+ self,
+ queue: Union["Queue", str, None] = None,
+ connection: Optional[ConnectionType] = None,
+ timeout: Optional[int] = settings.SCHEDULER_CONFIG.DEFAULT_JOB_TIMEOUT,
+ result_ttl: int = settings.SCHEDULER_CONFIG.DEFAULT_RESULT_TTL,
+ ttl: Optional[int] = None,
+ at_front: bool = False,
+ meta: Optional[Dict[Any, Any]] = None,
+ description: Optional[str] = None,
+ retries_left: Optional[int] = None,
+ retry_intervals: Union[int, List[int], None] = None,
+ on_failure: Optional[Union[Callback, Callable[..., Any]]] = None,
+ on_success: Optional[Union[Callback, Callable[..., Any]]] = None,
+ on_stopped: Optional[Union[Callback, Callable[..., Any]]] = None,
+ ):
+ """A decorator that adds a ``delay`` method to the decorated function, which in turn creates a RQ job when
+ called. Accepts a required ``queue`` argument that can be either a ``Queue`` instance or a string
+ denoting the queue name. For example::
- And also, it allows simplified ``@job`` syntax to put a job into the default queue.
- """
- if len(args) == 0:
- func = None
- queue = "default"
- else:
- if callable(args[0]):
- func = args[0]
+ >>> @job(queue='default')
+ >>> def simple_add(x, y):
+ >>> return x + y
+ >>> ...
+ >>> # Puts `simple_add` function into queue
+ >>> simple_add.delay(1, 2)
+
+ :param queue: The queue to use, can be the Queue class itself, or the queue name (str)
+ :type queue: Union['Queue', str]
+ :param connection: Broker Connection
+ :param timeout: Job timeout
+ :param result_ttl: Result time to live
+ :param ttl: Time to live for job execution
+ :param at_front: Whether to enqueue the job at front of the queue
+ :param meta: Arbitraty metadata about the job
+ :param description: Job description
+ :param retries_left: Number of retries left
+ :param retry_intervals: Retry intervals
+ :param on_failure: Callable to run on failure
+ :param on_success: Callable to run on success
+ :param on_stopped: Callable to run when stopped
+ """
+ if queue is None:
queue = "default"
- else:
- func = None
- queue = args[0]
- args = args[1:]
-
- if isinstance(queue, str):
- try:
- queue = get_queue(queue)
- if "connection" not in kwargs:
- kwargs["connection"] = queue.connection
- except KeyError:
- raise QueueNotFoundError(f"Queue {queue} does not exist")
-
- kwargs.setdefault("result_ttl", settings.SCHEDULER_CONFIG.DEFAULT_RESULT_TTL)
- kwargs.setdefault("timeout", settings.SCHEDULER_CONFIG.DEFAULT_TIMEOUT)
-
- decorator = rq_job_decorator(queue, *args, **kwargs)
- if func:
- JOB_METHODS_LIST.append(f"{func.__module__}.{func.__name__}")
- return decorator(func)
- return decorator
+ self.queue = get_queue(queue) if isinstance(queue, str) else queue
+ self.connection = connection
+ self.timeout = timeout
+ self.result_ttl = result_ttl
+ self.ttl = ttl
+ self.meta = meta
+ self.at_front = at_front
+ self.description = description
+ self.retries_left = retries_left
+ self.retry_intervals = retry_intervals
+ self.on_success = on_success
+ self.on_failure = on_failure
+ self.on_stopped = on_stopped
+
+ def __call__(self, f):
+ @wraps(f)
+ def delay(*args, **kwargs):
+ if isinstance(self.queue, str):
+ queue = Queue(name=self.queue, connection=self.connection)
+ else:
+ queue = self.queue
+
+ job_id = kwargs.pop("job_id", None)
+ at_front = kwargs.pop("at_front", False)
+
+ if not at_front:
+ at_front = self.at_front
+
+ return queue.enqueue_call(
+ f,
+ args=args,
+ kwargs=kwargs,
+ timeout=self.timeout,
+ result_ttl=self.result_ttl,
+ ttl=self.ttl,
+ name=job_id,
+ at_front=at_front,
+ meta=self.meta,
+ description=self.description,
+ retries_left=self.retries_left,
+ retry_intervals=self.retry_intervals,
+ on_failure=self.on_failure,
+ on_success=self.on_success,
+ on_stopped=self.on_stopped,
+ )
+
+ JOB_METHODS_LIST.append(f"{f.__module__}.{f.__name__}")
+ f.delay = delay
+ return f
diff --git a/scheduler/helpers/__init__.py b/scheduler/helpers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/scheduler/helpers/callback.py b/scheduler/helpers/callback.py
new file mode 100644
index 0000000..1777c92
--- /dev/null
+++ b/scheduler/helpers/callback.py
@@ -0,0 +1,71 @@
+import importlib
+import inspect
+from typing import Union, Callable, Any, Optional
+
+from scheduler.settings import SCHEDULER_CONFIG
+from scheduler.helpers.timeouts import JobTimeoutException
+
+
+class Callback:
+ def __init__(self, func: Union[str, Callable[..., Any]], timeout: Optional[int] = None):
+ self.timeout = timeout or SCHEDULER_CONFIG.CALLBACK_TIMEOUT
+ if not isinstance(self.timeout, int):
+ raise ValueError("Callback `timeout` must be an int")
+ if not isinstance(func, str) and not inspect.isfunction(func) and not inspect.isbuiltin(func):
+ raise ValueError("Callback `func` must be a string or function")
+ if isinstance(func, str):
+ func = _import_attribute(func)
+ self.func: Callable[..., Any] = func
+
+ @property
+ def name(self) -> str:
+ return "{0}.{1}".format(self.func.__module__, self.func.__qualname__)
+
+ def __call__(self, *args, **kwargs):
+ with SCHEDULER_CONFIG.DEATH_PENALTY_CLASS(self.timeout, JobTimeoutException):
+ return self.func(*args, **kwargs)
+
+
+def _import_attribute(name: str) -> Callable[..., Any]:
+ """Returns an attribute from a dotted path name. Example: `path.to.func`.
+
+ When the attribute we look for is a staticmethod, module name in its dotted path is not the last-before-end word
+ E.g.: package_a.package_b.module_a.ClassA.my_static_method
+ Thus we remove the bits from the end of the name until we can import it
+
+ :param name: The name (reference) to the path.
+ :raises ValueError: If no module is found or invalid attribute name.
+ :returns: An attribute (normally a Callable)
+ """
+ name_bits = name.split(".")
+ module_name_bits, attribute_bits = name_bits[:-1], [name_bits[-1]]
+ module = None
+ while len(module_name_bits):
+ try:
+ module_name = ".".join(module_name_bits)
+ module = importlib.import_module(module_name)
+ break
+ except ImportError:
+ attribute_bits.insert(0, module_name_bits.pop())
+
+ if module is None:
+ # maybe it's a builtin
+ try:
+ return __builtins__[name]
+ except KeyError:
+ raise ValueError("Invalid attribute name: %s" % name)
+
+ attribute_name = ".".join(attribute_bits)
+ if hasattr(module, attribute_name):
+ return getattr(module, attribute_name)
+ # staticmethods
+ attribute_name = attribute_bits.pop()
+ attribute_owner_name = ".".join(attribute_bits)
+ try:
+ attribute_owner = getattr(module, attribute_owner_name)
+ except: # noqa
+ raise ValueError("Invalid attribute name: %s" % attribute_name)
+
+ if not hasattr(attribute_owner, attribute_name):
+ raise ValueError("Invalid attribute name: %s" % name)
+ return getattr(attribute_owner, attribute_name)
diff --git a/scheduler/helpers/queues/__init__.py b/scheduler/helpers/queues/__init__.py
new file mode 100644
index 0000000..af2f09e
--- /dev/null
+++ b/scheduler/helpers/queues/__init__.py
@@ -0,0 +1,11 @@
+__all__ = [
+ "Queue",
+ "InvalidJobOperation",
+ "get_queue",
+ "get_all_workers",
+ "get_queues",
+ "perform_job",
+]
+
+from .getters import get_queue, get_all_workers, get_queues
+from .queue_logic import Queue, InvalidJobOperation, perform_job
diff --git a/scheduler/helpers/queues/getters.py b/scheduler/helpers/queues/getters.py
new file mode 100644
index 0000000..5d014ed
--- /dev/null
+++ b/scheduler/helpers/queues/getters.py
@@ -0,0 +1,109 @@
+from typing import List, Set
+
+from scheduler.broker_types import ConnectionErrorTypes, BrokerMetaData
+from scheduler.redis_models.worker import WorkerModel
+from scheduler.settings import (
+ SCHEDULER_CONFIG, get_queue_names, get_queue_configuration, QueueConfiguration, logger, Broker)
+from .queue_logic import Queue
+
+
+def _get_broker_connection(config: QueueConfiguration, use_strict_broker=False):
+ """
+ Returns a redis connection from a connection config
+ """
+ if SCHEDULER_CONFIG.BROKER == Broker.FAKEREDIS:
+ import fakeredis
+
+ broker_cls = fakeredis.FakeRedis if not use_strict_broker else fakeredis.FakeStrictRedis
+ else:
+ broker_cls = BrokerMetaData[(SCHEDULER_CONFIG.BROKER, use_strict_broker)].connection_type
+ logger.debug(f"Getting connection for {config}")
+ if config.URL:
+ ssl_url_protocol = BrokerMetaData[(SCHEDULER_CONFIG.BROKER, use_strict_broker)].ssl_prefix
+ if config.SSL or config.URL.startswith(f"{ssl_url_protocol}://"):
+ return broker_cls.from_url(
+ config.URL,
+ db=config.DB,
+ ssl_cert_reqs=config.SSL_CERT_REQS or "required",
+ )
+ else:
+ return broker_cls.from_url(
+ config.URL,
+ db=config.DB,
+ )
+ if config.UNIX_SOCKET_PATH:
+ return broker_cls(unix_socket_path=config.UNIX_SOCKET_PATH, db=config.DB)
+
+ if config.SENTINELS:
+ connection_kwargs = {
+ "db": config.DB,
+ "password": config.PASSWORD,
+ "username": config.USERNAME,
+ "socket_timeout": config.SOCKET_TIMEOUT,
+ }
+ connection_kwargs.update(config.CONNECTION_KWARGS or {})
+ sentinel_kwargs = config.SENTINEL_KWARGS or {}
+ SentinelClass = BrokerMetaData[(SCHEDULER_CONFIG.BROKER, use_strict_broker)].sentinel_type
+ sentinel = SentinelClass(config.SENTINELS, sentinel_kwargs=sentinel_kwargs, **connection_kwargs)
+ return sentinel.master_for(
+ service_name=config.MASTER_NAME,
+ redis_class=broker_cls,
+ )
+
+ return broker_cls(
+ host=config.HOST,
+ port=config.PORT,
+ db=config.DB,
+ username=config.USERNAME,
+ password=config.PASSWORD,
+ ssl=config.SSL,
+ ssl_cert_reqs=config.SSL_CERT_REQS,
+ **(config.CLIENT_KWARGS or {}),
+ )
+
+
+def _get_connection(queue_settings, use_strict_redis=False):
+ """Returns a Broker connection to use based on parameters in SCHEDULER_QUEUES"""
+ return _get_broker_connection(queue_settings, use_strict_redis)
+
+
+def get_queue(name="default", is_async=None, connection=None) -> Queue:
+ """Returns an DjangoQueue using parameters defined in `SCHEDULER_QUEUES`"""
+ queue_settings = get_queue_configuration(name)
+ is_async = is_async if is_async is not None else queue_settings.ASYNC
+ if connection is None:
+ connection = _get_connection(queue_settings)
+ return Queue(name=name, connection=connection, is_async=is_async)
+
+
+def get_all_workers() -> Set[WorkerModel]:
+ queue_names = get_queue_names()
+
+ workers_set: Set[WorkerModel] = set()
+ for queue_name in queue_names:
+ connection = _get_connection(get_queue_configuration(queue_name))
+ try:
+ curr_workers: Set[WorkerModel] = set(WorkerModel.all(connection=connection))
+ workers_set.update(curr_workers)
+ except ConnectionErrorTypes as e:
+ logger.error(f"Could not connect for queue {queue_name}: {e}")
+ return workers_set
+
+
+def get_queues(*queue_names, **kwargs) -> List[Queue]:
+ """Return queue instances from specified queue names. All instances must use the same Broker connection."""
+
+ queue_config = get_queue_configuration(queue_names[0])
+ queues = [get_queue(queue_names[0], **kwargs)]
+ # perform consistency checks while building return list
+ for queue_name in queue_names[1:]:
+ curr_queue_config = get_queue_configuration(queue_name)
+ if not queue_config.same_connection_params(curr_queue_config):
+ raise ValueError(
+ f'Queues must have the same broker connection. "{queue_name}" and'
+ f' "{queue_names[0]}" have different connections'
+ )
+ queue = get_queue(queue_name, **kwargs)
+ queues.append(queue)
+
+ return queues
diff --git a/scheduler/helpers/queues/queue_logic.py b/scheduler/helpers/queues/queue_logic.py
new file mode 100644
index 0000000..826d382
--- /dev/null
+++ b/scheduler/helpers/queues/queue_logic.py
@@ -0,0 +1,497 @@
+import asyncio
+import sys
+import traceback
+from datetime import datetime, timedelta
+from functools import total_ordering
+from typing import Dict, List, Optional, Tuple, Union, Self, Any
+
+from redis import WatchError
+
+from scheduler.broker_types import ConnectionType, FunctionReferenceType
+from scheduler.helpers.utils import utcnow, current_timestamp
+from scheduler.redis_models import as_str, JobStatus, Callback, SchedulerLock, Result, ResultType, JobModel
+from scheduler.redis_models.registry import (
+ JobNamesRegistry, FinishedJobRegistry, StartedJobRegistry,
+ FailedJobRegistry,
+ CanceledJobRegistry, ScheduledJobRegistry, QueuedJobRegistry,
+ NoSuchJobError, )
+from scheduler.settings import logger, SCHEDULER_CONFIG
+
+
+class InvalidJobOperation(Exception):
+ pass
+
+
+def perform_job(job_model: JobModel, connection: ConnectionType) -> Any: # noqa
+ """The main execution method. Invokes the job function with the job arguments.
+
+ :returns: The job's return value
+ """
+ job_model.persist(connection=connection)
+ _job_stack.append(job_model)
+ try:
+ result = job_model.func(*job_model.args, **job_model.kwargs)
+ if asyncio.iscoroutine(result):
+ loop = asyncio.new_event_loop()
+ coro_result = loop.run_until_complete(result)
+ result = coro_result
+ finally:
+ assert job_model is _job_stack.pop()
+ return result
+
+
+_job_stack = []
+
+
+@total_ordering
+class Queue:
+ REGISTRIES = dict(
+ finished="finished_job_registry",
+ failed="failed_job_registry",
+ scheduled="scheduled_job_registry",
+ started="started_job_registry",
+ canceled="canceled_job_registry",
+ queued="queued_job_registry",
+ )
+
+ def __init__(self, connection: Optional[ConnectionType], name: str, is_async: bool = True) -> None:
+ """Initializes a Queue object.
+
+ :param name: The queue name
+ :param connection: Broker connection
+ :param is_async: Whether jobs should run "async" (using the worker).
+ """
+ self.connection = connection
+ self.name = name
+ self._is_async = is_async
+ self.queued_job_registry = QueuedJobRegistry(connection=self.connection, name=self.name)
+ self.started_job_registry = StartedJobRegistry(connection=self.connection, name=self.name)
+ self.failed_job_registry = FailedJobRegistry(connection=self.connection, name=self.name)
+ self.finished_job_registry = FinishedJobRegistry(connection=self.connection, name=self.name)
+ self.scheduled_job_registry = ScheduledJobRegistry(connection=self.connection, name=self.name)
+ self.canceled_job_registry = CanceledJobRegistry(connection=self.connection, name=self.name)
+
+ def __len__(self):
+ return self.count
+
+ @property
+ def scheduler_pid(self) -> int:
+ lock = SchedulerLock(self.name)
+ pid = lock.value(self.connection)
+ return int(pid.decode()) if pid is not None else None
+
+ def clean_registries(self, timestamp: Optional[float] = None) -> None:
+ """Remove abandoned jobs from registry and add them to FailedJobRegistry.
+
+ Removes jobs with an expiry time earlier than current_timestamp, specified as seconds since the Unix epoch.
+ Removed jobs are added to the global failed job queue.
+ """
+ before_score = timestamp or current_timestamp()
+ started_jobs: List[Tuple[str, float]] = self.started_job_registry.get_job_names_before(
+ self.connection, before_score)
+
+ with self.connection.pipeline() as pipeline:
+ for job_name, job_score in started_jobs:
+ job = JobModel.get(job_name, connection=self.connection)
+ if job is None or job.failure_callback is None or job_score + job.timeout > before_score:
+ continue
+
+ logger.debug(f"Running failure callbacks for {job.name}")
+ try:
+ job.failure_callback(job, self.connection, traceback.extract_stack())
+ except Exception: # noqa
+ logger.exception(f"Job {self.name}: error while executing failure callback")
+ raise
+
+ retry = job.retries_left and job.retries_left > 0
+
+ if retry:
+ self.retry_job(job, pipeline)
+
+ else:
+ logger.warning(
+ f"{self.__class__.__name__} cleanup: Moving job to {self.failed_job_registry.key} "
+ f"(due to AbandonedJobError)"
+ )
+ job.set_status(JobStatus.FAILED, connection=pipeline)
+ exc_string = f"Moved to {self.failed_job_registry.key}, due to AbandonedJobError, at {datetime.now()}"
+ job.save(connection=pipeline)
+ job.expire(ttl=-1, connection=pipeline)
+ score = current_timestamp() + SCHEDULER_CONFIG.DEFAULT_FAILURE_TTL
+ Result.create(
+ connection=pipeline,
+ job_name=job.name,
+ _type=ResultType.FAILED,
+ ttl=SCHEDULER_CONFIG.DEFAULT_FAILURE_TTL,
+ exc_string=exc_string,
+ )
+ self.failed_job_registry.add(pipeline, job.name, score)
+ job.save(connection=pipeline)
+ job.expire(connection=pipeline, ttl=SCHEDULER_CONFIG.DEFAULT_FAILURE_TTL)
+
+ for registry in self.REGISTRIES.values():
+ getattr(self, registry).cleanup(connection=self.connection, timestamp=before_score)
+ pipeline.execute()
+
+ def first_queued_job_name(self) -> Optional[str]:
+ return self.queued_job_registry.get_first()
+
+ def empty(self):
+ """Removes all queued jobs from the queue."""
+ queued_jobs_count = self.queued_job_registry.count(connection=self.connection)
+ with self.connection.pipeline() as pipe:
+ for offset in range(0, queued_jobs_count, 1000):
+ job_names = self.queued_job_registry.all(offset, 1000)
+ for job_name in job_names:
+ self.queued_job_registry.delete(connection=pipe, job_name=job_name)
+ JobModel.delete_many(job_names, connection=pipe)
+ pipe.execute()
+
+ @property
+ def count(self) -> int:
+ """Returns a count of all messages in the queue."""
+ res = 0
+ for registry in self.REGISTRIES.values():
+ res += getattr(self, registry).count(connection=self.connection)
+ return res
+
+ def get_registry(self, name: str) -> Union[None, JobNamesRegistry]:
+ name = name.lower()
+ if name in Queue.REGISTRIES:
+ return getattr(self, Queue.REGISTRIES[name])
+ return None
+
+ def get_all_job_ids(self) -> List[str]:
+ res = list()
+ res.extend(self.queued_job_registry.all())
+ res.extend(self.finished_job_registry.all())
+ res.extend(self.started_job_registry.all())
+ res.extend(self.failed_job_registry.all())
+ res.extend(self.scheduled_job_registry.all())
+ res.extend(self.canceled_job_registry.all())
+ return res
+
+ def get_all_jobs(self) -> List[JobModel]:
+ job_ids = self.get_all_job_ids()
+ return JobModel.get_many(job_ids, connection=self.connection)
+
+ def enqueue_call(
+ self,
+ func: FunctionReferenceType,
+ args: Union[Tuple, List, None] = None,
+ kwargs: Optional[Dict] = None,
+ timeout: Optional[int] = None,
+ result_ttl: Optional[int] = None,
+ ttl: Optional[int] = None,
+ description: Optional[str] = None,
+ name: Optional[str] = None,
+ at_front: bool = False,
+ meta: Optional[Dict] = None,
+ retries_left: Optional[int] = None,
+ retry_intervals: Union[int, List[int], None] = None,
+ on_success: Optional[Callback] = None,
+ on_failure: Optional[Callback] = None,
+ on_stopped: Optional[Callback] = None,
+ task_type: Optional[str] = None,
+ scheduled_task_id: Optional[int] = None,
+ pipeline: Optional[ConnectionType] = None,
+ ) -> JobModel:
+ """Creates a job to represent the delayed function call and enqueues it.
+
+ :param func: The reference to the function
+ :param args: The `*args` to pass to the function
+ :param kwargs: The `**kwargs` to pass to the function
+ :param timeout: Function timeout
+ :param result_ttl: Result time to live
+ :param ttl: Time to live
+ :param description: The job description
+ :param name: The job name
+ :param at_front: Whether to enqueue the job at the front
+ :param meta: Metadata to attach to the job
+ :param retries_left: Number of retries left
+ :param retry_intervals: List of retry intervals
+ :param on_success: Callback for on success
+ :param on_failure: Callback for on failure
+ :param on_stopped: Callback for on stopped
+ :param task_type: The task type
+ :param scheduled_task_id: The scheduled task id
+ :param pipeline: The Redis Pipeline
+ :returns: The enqueued Job
+ """
+
+ job_model = JobModel.create(
+ connection=self.connection,
+ func=func,
+ args=args,
+ kwargs=kwargs,
+ result_ttl=result_ttl,
+ ttl=ttl,
+ description=description,
+ name=name,
+ meta=meta,
+ status=JobStatus.QUEUED,
+ timeout=timeout,
+ retries_left=retries_left,
+ retry_intervals=retry_intervals,
+ on_success=on_success,
+ on_failure=on_failure,
+ on_stopped=on_stopped,
+ queue_name=self.name,
+ task_type=task_type,
+ scheduled_task_id=scheduled_task_id,
+ )
+ job_model = self._enqueue_job(job_model, connection=pipeline, at_front=at_front)
+ return job_model
+
+ def run_job(self, job_model: JobModel) -> Any:
+ """Run the job
+ :param job_model: The job to run
+ :returns: The job result
+ """
+ result = perform_job(job_model, self.connection)
+ result_ttl = job_model.result_ttl or SCHEDULER_CONFIG.DEFAULT_RESULT_TTL
+ with self.connection.pipeline() as pipeline:
+ self.job_handle_success(job_model, result=result, result_ttl=result_ttl, connection=pipeline)
+ job_model.expire(result_ttl, connection=pipeline)
+ pipeline.execute()
+ return result
+
+ def job_handle_success(self, job: JobModel, result: Any, result_ttl: int, connection: ConnectionType):
+ """Saves and cleanup job after successful execution"""
+ job.set_status(JobStatus.FINISHED, connection=connection)
+ job.save(connection=connection)
+ Result.create(connection, job_name=job.name, _type=ResultType.SUCCESSFUL, return_value=result, ttl=result_ttl)
+
+ if result_ttl != 0:
+ self.finished_job_registry.add(connection, job.name, current_timestamp() + result_ttl)
+
+ def job_handle_failure(self, job: JobModel, exc_string: str, connection: ConnectionType):
+ # Does not set job status since the job might be stopped
+ score = current_timestamp() + SCHEDULER_CONFIG.DEFAULT_FAILURE_TTL
+ self.failed_job_registry.add(connection, job.name, score)
+ Result.create(connection, job.name, ResultType.FAILED, SCHEDULER_CONFIG.DEFAULT_FAILURE_TTL,
+ exc_string=exc_string)
+
+ def enqueue_at(self, when: datetime, f, *args, **kwargs) -> JobModel:
+ """Schedules a job to be enqueued at specified time
+ :param when: The time to enqueue the job
+ :param f: The function to call
+ :param args: The `*args` to pass to the function
+ :param kwargs: The `**kwargs` to pass to the function
+ :returns: The enqueued Job
+ """
+ job_model = JobModel.create(
+ connection=self.connection,
+ queue_name=self.name,
+ func=f,
+ status=JobStatus.SCHEDULED,
+ *args, **kwargs
+ )
+ job_model.save(connection=self.connection)
+ self.scheduled_job_registry.schedule(self.connection, job_model, when)
+ return job_model
+
+ def retry_job(self, job: JobModel, connection: ConnectionType):
+ """Requeue or schedule this job for execution.
+ If the the `retry_interval` was set on the job itself,
+ it will calculate a scheduled time for the job to run, and instead
+ of just regularly `enqueing` the job, it will `schedule` it.
+
+ Args:
+ job (JobModel): The queue to retry the job on
+ connection (ConnectionType): The Redis' pipeline to use
+ """
+ number_of_intervals = len(job.retry_intervals)
+ index = max(number_of_intervals - job.retries_left, 0)
+ retry_interval = job.retry_intervals[index]
+ job.retries_left = job.retries_left - 1
+ if retry_interval:
+ scheduled_datetime = utcnow() + timedelta(seconds=retry_interval)
+ job.set_status(JobStatus.SCHEDULED, connection=connection)
+ job.save(connection=connection)
+ self.scheduled_job_registry.schedule(connection, job, scheduled_datetime)
+ else:
+ self._enqueue_job(job, connection=connection)
+
+ def _enqueue_job(
+ self,
+ job_model: JobModel,
+ connection: Optional[ConnectionType] = None,
+ at_front: bool = False) -> JobModel:
+ """Enqueues a job for delayed execution without checking dependencies.
+
+ If Queue is instantiated with is_async=False, job is executed immediately.
+ :param job_model: The job redis model
+ :param connection: The Redis Pipeline
+ :param at_front: Whether to enqueue the job at the front
+
+ :returns: The enqueued JobModel
+ """
+
+ pipe = connection if connection is not None else self.connection.pipeline()
+
+ # Add Queue key set
+ job_model.status = JobStatus.QUEUED
+ job_model.enqueued_at = utcnow()
+ job_model.save(connection=pipe)
+ job_model.expire(ttl=job_model.ttl, connection=pipe)
+
+ if self._is_async:
+ if at_front:
+ score = current_timestamp()
+ else:
+ score = self.queued_job_registry.get_last_timestamp() or current_timestamp()
+ self.queued_job_registry.add(connection=pipe, score=score, member=job_model.name)
+ result = pipe.execute()
+ logger.debug(f"Pushed job {job_model.name} into {self.name}, {result[3]} job(s) are in queue.")
+ else: # sync mode
+ job_model = self.run_sync(job_model)
+
+ return job_model
+
+ def run_sync(self, job: JobModel) -> JobModel:
+ """Run a job synchronously, meaning on the same process the method was called."""
+ job.prepare_for_execution("sync", self.started_job_registry, self.connection)
+
+ try:
+ result = self.run_job(job)
+ except: # noqa
+ with self.connection.pipeline() as pipeline:
+ job.set_status(JobStatus.FAILED, connection=pipeline)
+ exc_string = "".join(traceback.format_exception(*sys.exc_info()))
+ self.job_handle_failure(job, exc_string, pipeline)
+ pipeline.execute()
+
+ if job.failure_callback:
+ job.failure_callback(job, self.connection, *sys.exc_info()) # type: ignore
+ else:
+ if job.success_callback:
+ job.success_callback(job, self.connection, result) # type: ignore
+
+ return job
+
+ @classmethod
+ def dequeue_any(
+ cls,
+ queues: List[Self],
+ timeout: Optional[int],
+ connection: Optional[ConnectionType] = None,
+ ) -> Tuple[Optional[JobModel], Optional[Self]]:
+ """Class method returning a Job instance at the front of the given set of Queues, where the order of the queues
+ is important.
+
+ When all the Queues are empty, depending on the `timeout` argument, either blocks execution of this function
+ for the duration of the timeout or until new messages arrive on any of the queues, or returns None.
+
+ :param queues: List of Queue objects
+ :param timeout: Timeout for the pop operation
+ :param connection: Broker Connection
+ :returns: Tuple of Job, Queue
+ """
+
+ while True:
+ registries = [q.queued_job_registry for q in queues]
+
+ result = QueuedJobRegistry.pop(connection, registries, timeout)
+ if result == (None, None):
+ return None, None
+
+ registry_key, job_name = map(as_str, result)
+ queue = next(filter(lambda q: q.queued_job_registry.key == registry_key, queues), None)
+ if queue is None:
+ logger.warning(f"Could not find queue for registry key {registry_key} in queues")
+ return None, None
+
+ job = JobModel.get(job_name, connection=connection)
+ if job is None:
+ continue
+ return job, queue
+ return None, None
+
+ def __eq__(self, other: Self) -> bool:
+ if not isinstance(other, Queue):
+ raise TypeError("Cannot compare queues to other objects")
+ return self.name == other.name
+
+ def __lt__(self, other: Self) -> bool:
+ if not isinstance(other, Queue):
+ raise TypeError("Cannot compare queues to other objects")
+ return self.name < other.name
+
+ def __hash__(self) -> int:
+ return hash(self.name)
+
+ def __repr__(self) -> str:
+ return "{0}({1!r})".format(self.__class__.__name__, self.name)
+
+ def __str__(self) -> str:
+ return "<{0} {1}>".format(self.__class__.__name__, self.name)
+
+ def _remove_from_registries(self, job_name: str, connection: ConnectionType) -> None:
+ """Removes the job from all registries besides failed_job_registry"""
+ self.finished_job_registry.delete(connection=connection, job_name=job_name)
+ self.scheduled_job_registry.delete(connection=connection, job_name=job_name)
+ self.started_job_registry.delete(connection=connection, job_name=job_name)
+ self.canceled_job_registry.delete(connection=connection, job_name=job_name)
+ self.queued_job_registry.delete(connection=connection, job_name=job_name)
+
+ def cancel_job(self, job_name: str) -> None:
+ """Cancels the given job, which will prevent the job from ever running (or inspected).
+
+ This method merely exists as a high-level API call to cancel jobs without worrying about the internals required
+ to implement job cancellation.
+
+ :param job_name: The job name to cancel.
+ :raises NoSuchJobError: If the job does not exist.
+ :raises InvalidJobOperation: If the job has already been canceled.
+ """
+ job = JobModel.get(job_name, connection=self.connection)
+ if job is None:
+ raise NoSuchJobError("No such job: {}".format(job_name))
+ if job.status == JobStatus.CANCELED:
+ raise InvalidJobOperation("Cannot cancel already canceled job: {}".format(job.name))
+
+ pipe = self.connection.pipeline()
+
+ while True:
+ try:
+ job.set_field("status", JobStatus.CANCELED, connection=pipe)
+ self._remove_from_registries(job_name, connection=pipe)
+ self.canceled_job_registry.add(pipe, job_name, 0)
+ pipe.execute()
+ break
+ except WatchError:
+ # if the pipeline comes from the caller, we re-raise the exception as it is the responsibility of the
+ # caller to handle it
+ raise
+
+ def delete_job(self, job_name: str):
+ """Deletes the given job from the queue and all its registries"""
+
+ pipe = self.connection.pipeline()
+
+ while True:
+ try:
+ self._remove_from_registries(job_name, connection=pipe)
+ self.failed_job_registry.delete(connection=pipe, job_name=job_name)
+ if JobModel.exists(job_name, connection=self.connection):
+ JobModel.delete_many([job_name], connection=pipe)
+ pipe.execute()
+ break
+ except WatchError:
+ pass
+
+ def requeue_jobs(self, *job_names: str, at_front: bool = False) -> int:
+ jobs = JobModel.get_many(job_names, connection=self.connection)
+ jobs_requeued = 0
+ with self.connection.pipeline() as pipe:
+ for job in jobs:
+ if job is None:
+ continue
+ job.started_at = None
+ job.ended_at = None
+ job.save(connection=pipe)
+ self._enqueue_job(job, connection=pipe, at_front=at_front)
+ jobs_requeued += 1
+ pipe.execute()
+ return jobs_requeued
diff --git a/scheduler/helpers/timeouts.py b/scheduler/helpers/timeouts.py
new file mode 100644
index 0000000..95920ba
--- /dev/null
+++ b/scheduler/helpers/timeouts.py
@@ -0,0 +1,111 @@
+import ctypes
+import signal
+import threading
+
+
+class BaseTimeoutException(Exception):
+ """Base exception for timeouts."""
+ pass
+
+
+class JobTimeoutException(BaseTimeoutException):
+ """Raised when a job takes longer to complete than the allowed maximum timeout value."""
+ pass
+
+
+class JobExecutionMonitorTimeoutException(BaseTimeoutException):
+ """Raised when waiting for a job-execution-process exiting takes longer than the maximum timeout value."""
+ pass
+
+
+class BaseDeathPenalty:
+ """Base class to setup job timeouts."""
+
+ def __init__(self, timeout, exception=BaseTimeoutException, **kwargs):
+ self._timeout = timeout
+ self._exception = exception
+
+ def __enter__(self):
+ self.setup_death_penalty()
+
+ def __exit__(self, type, value, traceback):
+ # Always cancel immediately, since we're done
+ try:
+ self.cancel_death_penalty()
+ except BaseTimeoutException:
+ # Weird case: we're done with the with body, but now the alarm is fired. We may safely ignore this
+ # situation and consider the body done.
+ pass
+
+ # __exit__ may return True to supress further exception handling. We don't want to suppress any exceptions
+ # here, since all errors should just pass through, BaseTimeoutException being handled normally to the invoking
+ # context.
+ return False
+
+ def setup_death_penalty(self):
+ raise NotImplementedError()
+
+ def cancel_death_penalty(self):
+ raise NotImplementedError()
+
+
+class UnixSignalDeathPenalty(BaseDeathPenalty):
+ def handle_death_penalty(self, signum, frame):
+ raise self._exception("Task exceeded maximum timeout value ({0} seconds)".format(self._timeout))
+
+ def setup_death_penalty(self):
+ """Sets up an alarm signal and a signal handler that raises an exception after the timeout amount (expressed
+ in seconds)."""
+ signal.signal(signal.SIGALRM, self.handle_death_penalty)
+ signal.alarm(self._timeout)
+
+ def cancel_death_penalty(self):
+ """Removes the death penalty alarm and puts back the system into default signal handling."""
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, signal.SIG_DFL)
+
+
+class TimerDeathPenalty(BaseDeathPenalty):
+ def __init__(self, timeout, exception=JobTimeoutException, **kwargs):
+ super().__init__(timeout, exception, **kwargs)
+ self._target_thread_id = threading.current_thread().ident
+ self._timer = None
+
+ # Monkey-patch exception with the message ahead of time
+ # since PyThreadState_SetAsyncExc can only take a class
+ def init_with_message(self, *args, **kwargs): # noqa
+ super(exception, self).__init__("Task exceeded maximum timeout value ({0} seconds)".format(timeout))
+
+ self._exception.__init__ = init_with_message
+
+ def new_timer(self):
+ """Returns a new timer since timers can only be used once."""
+ return threading.Timer(self._timeout, self.handle_death_penalty)
+
+ def handle_death_penalty(self):
+ """Raises an asynchronous exception in another thread.
+
+ Reference http://docs.python.org/c-api/init.html#PyThreadState_SetAsyncExc for more info.
+ """
+ ret = ctypes.pythonapi.PyThreadState_SetAsyncExc(
+ ctypes.c_long(self._target_thread_id), ctypes.py_object(self._exception)
+ )
+ if ret == 0:
+ raise ValueError("Invalid thread ID {}".format(self._target_thread_id))
+ elif ret > 1:
+ ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self._target_thread_id), 0)
+ raise SystemError("PyThreadState_SetAsyncExc failed")
+
+ def setup_death_penalty(self):
+ """Starts the timer."""
+ if self._timeout <= 0:
+ return
+ self._timer = self.new_timer()
+ self._timer.start()
+
+ def cancel_death_penalty(self):
+ """Cancels the timer."""
+ if self._timeout <= 0:
+ return
+ self._timer.cancel()
+ self._timer = None
diff --git a/scheduler/tools.py b/scheduler/helpers/tools.py
similarity index 57%
rename from scheduler/tools.py
rename to scheduler/helpers/tools.py
index c73aae8..56bccd2 100644
--- a/scheduler/tools.py
+++ b/scheduler/helpers/tools.py
@@ -1,17 +1,17 @@
-import importlib
import os
-from typing import List, Any, Callable, Optional
+from typing import Any, Optional
import croniter
from django.apps import apps
from django.db import models
from django.utils import timezone
-from django.utils.module_loading import import_string
from django.utils.translation import gettext_lazy as _
-from scheduler.queues import get_queues, logger, get_queue
-from scheduler.rq_classes import DjangoWorker, JobExecution, TASK_TYPES, MODEL_NAMES
-from scheduler.settings import SCHEDULER_CONFIG, Broker
+from scheduler.broker_types import TASK_TYPES
+from scheduler.helpers.queues import get_queues
+from scheduler.redis_models import WorkerModel
+from scheduler.settings import SCHEDULER_CONFIG, Broker, logger
+from scheduler.worker.worker import Worker
class TaskType(models.TextChoices):
@@ -20,15 +20,6 @@ class TaskType(models.TextChoices):
ONCE = "OnceTaskType", _("Run once")
-def callable_func(callable_str: str) -> Callable:
- path = callable_str.split(".")
- module = importlib.import_module(".".join(path[:-1]))
- func = getattr(module, path[-1])
- if callable(func) is False:
- raise TypeError("'{}' is not callable".format(callable_str))
- return func
-
-
def get_next_cron_time(cron_string: Optional[str]) -> Optional[timezone.datetime]:
"""Calculate the next scheduled time by creating a crontab object with a cron string"""
if cron_string is None:
@@ -51,17 +42,13 @@ def get_scheduled_task(task_type_str: str, task_id: int) -> "BaseTask": # noqa:
return task
except ValueError:
raise ValueError(f"Invalid task type {task_type_str}")
- elif task_type_str in MODEL_NAMES:
- model = apps.get_model(app_label="scheduler", model_name=task_type_str)
- task = model.objects.filter(id=task_id).first()
- if task is None:
- raise ValueError(f"Job {task_type_str}:{task_id} does not exit")
- return task
raise ValueError(f"Job Model {task_type_str} does not exist, choices are {TASK_TYPES}")
def run_task(task_model: str, task_id: int) -> Any:
"""Run a scheduled job"""
+ if isinstance(task_id, str):
+ task_id = int(task_id)
scheduled_task = get_scheduled_task(task_model, task_id)
logger.debug(f"Running task {str(scheduled_task)}")
args = scheduled_task.parse_args()
@@ -80,32 +67,17 @@ def _calc_worker_name(existing_worker_names) -> str:
return worker_name
-def create_worker(*queue_names, **kwargs) -> DjangoWorker:
+def create_worker(*queue_names, **kwargs) -> Worker:
"""Returns a Django worker for all queues or specified ones."""
-
queues = get_queues(*queue_names)
- existing_workers = DjangoWorker.all(connection=queues[0].connection)
- existing_worker_names = set(map(lambda w: w.name, existing_workers))
+ existing_worker_names = WorkerModel.all_names(connection=queues[0].connection)
kwargs.setdefault("fork_job_execution", SCHEDULER_CONFIG.BROKER != Broker.FAKEREDIS)
if kwargs.get("name", None) is None:
kwargs["name"] = _calc_worker_name(existing_worker_names)
+ if kwargs["name"] in existing_worker_names:
+ raise ValueError(f"Worker {kwargs['name']} already exists")
kwargs["name"] = kwargs["name"].replace("/", ".")
- # Handle job_class if provided
- if "job_class" not in kwargs or kwargs["job_class"] is None:
- kwargs["job_class"] = "scheduler.rq_classes.JobExecution"
- try:
- kwargs["job_class"] = import_string(kwargs["job_class"])
- except ImportError:
- raise ImportError(f"Could not import job class {kwargs['job_class']}")
-
- worker = DjangoWorker(queues, connection=queues[0].connection, **kwargs)
+ worker = Worker(queues, connection=queues[0].connection, **kwargs)
return worker
-
-
-def get_job_executions_for_task(queue_name, scheduled_task) -> List[JobExecution]:
- queue = get_queue(queue_name)
- job_list = queue.get_all_jobs()
- res = list(filter(lambda j: j.is_execution_of(scheduled_task), job_list))
- return res
diff --git a/scheduler/helpers/utils.py b/scheduler/helpers/utils.py
new file mode 100644
index 0000000..f11ec16
--- /dev/null
+++ b/scheduler/helpers/utils.py
@@ -0,0 +1,23 @@
+import datetime
+import importlib
+import time
+from typing import Callable
+
+
+def current_timestamp() -> int:
+ """Returns current UTC timestamp in secs"""
+ return int(time.time())
+
+
+def utcnow():
+ """Return now in UTC"""
+ return datetime.datetime.now(datetime.UTC)
+
+
+def callable_func(callable_str: str) -> Callable:
+ path = callable_str.split(".")
+ module = importlib.import_module(".".join(path[:-1]))
+ func = getattr(module, path[-1])
+ if callable(func) is False:
+ raise TypeError("'{}' is not callable".format(callable_str))
+ return func
diff --git a/scheduler/management/commands/delete_failed_executions.py b/scheduler/management/commands/delete_failed_executions.py
index 01224e0..1922f6e 100644
--- a/scheduler/management/commands/delete_failed_executions.py
+++ b/scheduler/management/commands/delete_failed_executions.py
@@ -1,8 +1,8 @@
import click
from django.core.management.base import BaseCommand
-from scheduler.queues import get_queue
-from scheduler.rq_classes import JobExecution
+from scheduler.helpers.queues import get_queue
+from scheduler.redis_models import JobModel
class Command(BaseCommand):
@@ -15,15 +15,15 @@ def add_arguments(self, parser):
def handle(self, *args, **options):
queue = get_queue(options.get("queue", "default"))
- job_ids = queue.failed_job_registry.get_job_ids()
- jobs = JobExecution.fetch_many(job_ids, connection=queue.connection)
+ job_ids = queue.failed_job_registry.all()
+ jobs = JobModel.get_many(job_ids, connection=queue.connection)
func_name = options.get("func", None)
if func_name is not None:
jobs = [job for job in jobs if job.func_name == func_name]
dry_run = options.get("dry_run", False)
click.echo(f"Found {len(jobs)} failed jobs")
- for job in jobs:
- click.echo(f"Deleting {job.id}")
+ for job in job_ids:
+ click.echo(f"Deleting {job}")
if not dry_run:
- job.delete()
+ queue.delete_job(job)
click.echo(f"Deleted {len(jobs)} failed jobs")
diff --git a/scheduler/management/commands/export.py b/scheduler/management/commands/export.py
index bb2b249..68fecab 100644
--- a/scheduler/management/commands/export.py
+++ b/scheduler/management/commands/export.py
@@ -1,10 +1,9 @@
import sys
import click
-from django.apps import apps
from django.core.management.base import BaseCommand
-from scheduler.tools import MODEL_NAMES
+from scheduler.models.task import Task
class Command(BaseCommand):
@@ -43,13 +42,12 @@ def add_arguments(self, parser):
def handle(self, *args, **options):
file = open(options.get("filename"), "w") if options.get("filename") else sys.stdout
res = list()
- for model_name in MODEL_NAMES:
- model = apps.get_model(app_label="scheduler", model_name=model_name)
- jobs = model.objects.all()
- if options.get("enabled"):
- jobs = jobs.filter(enabled=True)
- for job in jobs:
- res.append(job.to_dict())
+
+ tasks = Task.objects.all()
+ if options.get("enabled"):
+ tasks = tasks.filter(enabled=True)
+ for task in tasks:
+ res.append(task.to_dict())
if options.get("format") == "json":
import json
diff --git a/scheduler/management/commands/import.py b/scheduler/management/commands/import.py
index eca0661..7fe2940 100644
--- a/scheduler/management/commands/import.py
+++ b/scheduler/management/commands/import.py
@@ -2,7 +2,6 @@
from typing import Dict, Any, Optional
import click
-from django.apps import apps
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
@@ -10,7 +9,6 @@
from scheduler.models.task import TaskArg, TaskKwarg, Task
from scheduler.models.task import TaskType
-from scheduler.tools import MODEL_NAMES
def job_model_str(model_str: str) -> str:
@@ -54,7 +52,7 @@ def create_task_from_dict(task_dict: Dict[str, Any], update: bool) -> Optional[T
if not settings.USE_TZ and not timezone.is_naive(target):
target = timezone.make_naive(target)
kwargs["scheduled_time"] = target
- model_fields = filter(lambda field: hasattr(field, 'attname'), Task._meta.get_fields())
+ model_fields = filter(lambda field: hasattr(field, "attname"), Task._meta.get_fields())
model_fields = set(map(lambda field: field.attname, model_fields))
keys_to_ignore = list(filter(lambda _k: _k not in model_fields, kwargs.keys()))
for k in keys_to_ignore:
@@ -139,9 +137,7 @@ def handle(self, *args, **options):
jobs = yaml.load(file, yaml.SafeLoader)
if options.get("reset"):
- for model_name in MODEL_NAMES:
- model = apps.get_model(app_label="scheduler", model_name=model_name)
- model.objects.all().delete()
+ Task.objects.all().delete()
for job in jobs:
create_task_from_dict(job, update=options.get("update"))
diff --git a/scheduler/management/commands/rqstats.py b/scheduler/management/commands/rqstats.py
index 4ba54e9..f922144 100644
--- a/scheduler/management/commands/rqstats.py
+++ b/scheduler/management/commands/rqstats.py
@@ -9,7 +9,7 @@
ANSI_LIGHT_WHITE = "\033[1;37m"
ANSI_RESET = "\033[0m"
-KEYS = ("jobs", "started_jobs", "deferred_jobs", "finished_jobs", "canceled_jobs", "workers")
+KEYS = ("jobs", "started_jobs", "finished_jobs", "canceled_jobs", "workers")
class Command(BaseCommand):
@@ -60,7 +60,7 @@ def _print_stats_dashboard(self, statistics, prev_stats=None):
click.echo()
self._print_separator()
click.echo(
- f'| {"Name":<16} | Queued | Active | Deferred |' f" Finished |" f" Canceled |" f" Workers |"
+ f'| {"Name":<16} | Queued | Active |' f" Finished |" f" Canceled |" f" Workers |"
)
self._print_separator()
for ind, queue in enumerate(statistics["queues"]):
diff --git a/scheduler/management/commands/rqworker.py b/scheduler/management/commands/rqworker.py
index ce6201b..1503ca0 100644
--- a/scheduler/management/commands/rqworker.py
+++ b/scheduler/management/commands/rqworker.py
@@ -5,11 +5,10 @@
import click
from django.core.management.base import BaseCommand
from django.db import connections
-from rq.logutils import setup_loghandlers
from scheduler.broker_types import ConnectionErrorTypes
-from scheduler.rq_classes import register_sentry
-from scheduler.tools import create_worker
+from scheduler.settings import logger
+from scheduler.helpers.tools import create_worker
VERBOSITY_TO_LOG_LEVEL = {
0: logging.CRITICAL,
@@ -20,20 +19,14 @@
WORKER_ARGUMENTS = {
"name",
- "default_result_ttl",
"connection",
"exc_handler",
"exception_handlers",
- "default_worker_ttl",
"maintenance_interval",
- "job_class",
- "queue_class",
"log_job_description",
"job_monitoring_interval",
"disable_default_exception_handler",
"prepare_for_work",
- "serializer",
- "work_horse_killed_handler",
}
@@ -42,6 +35,17 @@ def reset_db_connections():
c.close()
+def register_sentry(sentry_dsn, **opts):
+ try:
+ import sentry_sdk
+ from sentry_sdk.integrations.rq import RqIntegration
+ except ImportError:
+ logger.error("Sentry SDK not installed. Skipping Sentry Integration")
+ return
+
+ sentry_sdk.init(sentry_dsn, integrations=[RqIntegration()], **opts)
+
+
class Command(BaseCommand):
"""
Runs RQ workers on specified queues. Note that all queues passed into a
@@ -85,7 +89,6 @@ def add_arguments(self, parser):
type=bool,
help="Fork job execution to another process",
)
- parser.add_argument("--job-class", action="store", dest="job_class", help="Jobs class to use")
parser.add_argument(
"queues",
nargs="*",
@@ -111,7 +114,7 @@ def handle(self, **options):
# Verbosity is defined by default in BaseCommand for all commands
verbosity = options.pop("verbosity", 1)
log_level = VERBOSITY_TO_LOG_LEVEL.get(verbosity, logging.INFO)
- setup_loghandlers(log_level)
+ logger.setLevel(log_level)
init_options = {k: v for k, v in options.items() if k in WORKER_ARGUMENTS}
@@ -129,7 +132,6 @@ def handle(self, **options):
w.work(
burst=options.get("burst", False),
- logging_level=log_level,
max_jobs=options["max_jobs"],
)
except ConnectionErrorTypes as e:
diff --git a/scheduler/management/commands/run_job.py b/scheduler/management/commands/run_job.py
index 48c7458..4f1e556 100644
--- a/scheduler/management/commands/run_job.py
+++ b/scheduler/management/commands/run_job.py
@@ -1,7 +1,7 @@
import click
from django.core.management.base import BaseCommand
-from scheduler.queues import get_queue
+from scheduler.helpers.queues import get_queue
class Command(BaseCommand):
@@ -34,4 +34,4 @@ def handle(self, **options):
args = options.get("args")
job = queue.enqueue_call(func, args=args, timeout=timeout, result_ttl=result_ttl)
if verbosity:
- click.echo(f"Job {job.id} created")
+ click.echo(f"Job {job.name} created")
diff --git a/scheduler/models/args.py b/scheduler/models/args.py
index f7cd57b..ac2d700 100644
--- a/scheduler/models/args.py
+++ b/scheduler/models/args.py
@@ -7,7 +7,7 @@
from django.db import models
from django.utils.translation import gettext_lazy as _
-from scheduler import tools
+from scheduler.helpers import utils
ARG_TYPE_TYPES_DICT = {
"str": str,
@@ -48,7 +48,7 @@ def clean(self):
)
try:
if self.arg_type == "callable":
- tools.callable_func(self.val)
+ utils.callable_func(self.val)
elif self.arg_type == "datetime":
datetime.fromisoformat(self.val)
elif self.arg_type == "bool":
@@ -71,7 +71,7 @@ def delete(self, **kwargs):
def value(self):
if self.arg_type == "callable":
- res = tools.callable_func(self.val)()
+ res = utils.callable_func(self.val)()
elif self.arg_type == "datetime":
res = datetime.fromisoformat(self.val)
elif self.arg_type == "bool":
diff --git a/scheduler/models/task.py b/scheduler/models/task.py
index fdd7253..4c61644 100644
--- a/scheduler/models/task.py
+++ b/scheduler/models/task.py
@@ -1,10 +1,9 @@
import math
import uuid
from datetime import timedelta, datetime
-from typing import Dict
+from typing import Dict, Any, Optional
import croniter
-from django.apps import apps
from django.conf import settings as django_settings
from django.contrib import admin
from django.contrib.contenttypes.fields import GenericRelation
@@ -18,24 +17,30 @@
from django.utils.translation import gettext_lazy as _
from scheduler import settings
-from scheduler import tools
+from scheduler.helpers import tools, utils
+from scheduler.broker_types import ConnectionType
+from scheduler.helpers.tools import TaskType
from scheduler.models.args import TaskArg, TaskKwarg
-from scheduler.queues import get_queue
-from scheduler.rq_classes import DjangoQueue
-from scheduler.settings import QUEUES
-from scheduler.settings import logger
-from scheduler.tools import TaskType
+from scheduler.helpers.queues import get_queue
+from scheduler.redis_models import Callback
+from scheduler.redis_models import JobModel
+from scheduler.helpers.queues import Queue
+from scheduler.settings import logger, get_queue_names
SCHEDULER_INTERVAL = settings.SCHEDULER_CONFIG.SCHEDULER_INTERVAL
-def failure_callback(job, connection, result, *args, **kwargs):
- task_type = job.meta.get("task_type", None)
- if task_type is None:
- return
- task = Task.objects.filter(job_id=job.id).first()
+def _get_task_for_job(job: JobModel) -> Optional["Task"]:
+ if job.task_type is None or job.scheduled_task_id is None:
+ return None
+ task = Task.objects.filter(id=job.scheduled_task_id).first()
+ return task
+
+
+def failure_callback(job: JobModel, connection, result, *args, **kwargs):
+ task = _get_task_for_job(job)
if task is None:
- logger.warn(f"Could not find task for job {job.id}")
+ logger.warn(f"Could not find task for job {job.name}")
return
mail_admins(
f"Task {task.id}/{task.name} has failed",
@@ -47,17 +52,10 @@ def failure_callback(job, connection, result, *args, **kwargs):
task.save(schedule_job=True)
-def success_callback(job, connection, result, *args, **kwargs):
- task_type = job.meta.get("task_type", None)
- if task_type is None:
- return
-
- task = Task.objects.filter(job_id=job.id).first()
- if task is None:
- model = apps.get_model(app_label="scheduler", model_name=task_type)
- task = model.objects.filter(job_id=job.id).first()
+def success_callback(job: JobModel, connection: ConnectionType, result: Any, *args, **kwargs):
+ task = _get_task_for_job(job)
if task is None:
- logger.warn(f"Could not find task for job {job.id}")
+ logger.warn(f"Could not find task for job {job.name}")
return
task.job_id = None
task.successful_runs += 1
@@ -66,7 +64,8 @@ def success_callback(job, connection, result, *args, **kwargs):
def get_queue_choices():
- return [(queue, queue) for queue in QUEUES.keys()]
+ queue_names = get_queue_names()
+ return [(queue, queue) for queue in queue_names]
class Task(models.Model):
@@ -177,7 +176,7 @@ class TimeUnits(models.TextChoices):
def callable_func(self):
"""Translate callable string to callable"""
- return tools.callable_func(self.callable)
+ return utils.callable_func(self.callable)
@admin.display(boolean=True, description=_("is scheduled?"))
def is_scheduled(self) -> bool:
@@ -185,10 +184,9 @@ def is_scheduled(self) -> bool:
if self.job_id is None: # no job_id => is not scheduled
return False
# check whether job_id is in scheduled/queued/active jobs
- scheduled_jobs = self.rqueue.scheduled_job_registry.get_job_ids()
- enqueued_jobs = self.rqueue.get_job_ids()
- active_jobs = self.rqueue.started_job_registry.get_job_ids()
- res = (self.job_id in scheduled_jobs) or (self.job_id in enqueued_jobs) or (self.job_id in active_jobs)
+ res = ((self.job_id in self.rqueue.scheduled_job_registry.all())
+ or (self.job_id in self.rqueue.queued_job_registry.all())
+ or (self.job_id in self.rqueue.started_job_registry.all()))
# If the job_id is not scheduled/queued/started,
# update the job_id to None. (The job_id belongs to a previous run which is completed)
if not res:
@@ -229,18 +227,17 @@ def _enqueue_args(self) -> Dict:
- set job meta
"""
res = dict(
- meta=dict(
- task_type=self.task_type,
- scheduled_task_id=self.id,
- ),
- on_success=success_callback,
- on_failure=failure_callback,
- job_id=self._next_job_id(),
+ meta=dict(),
+ task_type=self.task_type,
+ scheduled_task_id=self.id,
+ on_success=Callback(success_callback),
+ on_failure=Callback(failure_callback),
+ name=self._next_job_id(),
)
if self.at_front:
res["at_front"] = self.at_front
if self.timeout:
- res["job_timeout"] = self.timeout
+ res["timeout"] = self.timeout
if self.result_ttl is not None:
res["result_ttl"] = self.result_ttl
if self.task_type == TaskType.REPEATABLE:
@@ -249,19 +246,19 @@ def _enqueue_args(self) -> Dict:
return res
@property
- def rqueue(self) -> DjangoQueue:
+ def rqueue(self) -> Queue:
"""Returns django-queue for job"""
return get_queue(self.queue)
def enqueue_to_run(self) -> bool:
"""Enqueue task to run now."""
kwargs = self._enqueue_args()
- job = self.rqueue.enqueue(
+ job = self.rqueue.enqueue_call(
tools.run_task,
args=(self.task_type, self.id),
**kwargs,
)
- self.job_id = job.id
+ self.job_id = job.name
self.save(schedule_job=False)
return True
@@ -270,12 +267,9 @@ def unschedule(self) -> bool:
If a job is queued to be executed or scheduled to be executed, it will remove it.
"""
- queue = self.rqueue
- if self.job_id is None:
- return True
- queue.remove(self.job_id)
- queue.scheduled_job_registry.remove(self.job_id)
- self.job_id = None
+ if self.job_id is not None:
+ self.rqueue.delete_job(self.job_id)
+ self.job_id = None
self.save(schedule_job=False)
return True
@@ -365,7 +359,7 @@ def _schedule(self) -> bool:
args=(self.task_type, self.id),
**kwargs,
)
- self.job_id = job.id
+ self.job_id = job.name
super(Task, self).save()
return True
@@ -391,19 +385,19 @@ def interval_seconds(self):
def clean_callable(self):
try:
- tools.callable_func(self.callable)
+ utils.callable_func(self.callable)
except Exception:
raise ValidationError(
{"callable": ValidationError(_("Invalid callable, must be importable"), code="invalid")}
)
def clean_queue(self):
- queue_keys = settings.QUEUES.keys()
- if self.queue not in queue_keys:
+ queue_names = settings.get_queue_names()
+ if self.queue not in queue_names:
raise ValidationError(
{
"queue": ValidationError(
- _("Invalid queue, must be one of: {}".format(", ".join(queue_keys))), code="invalid"
+ _("Invalid queue, must be one of: {}".format(", ".join(queue_names))), code="invalid"
)
}
)
diff --git a/scheduler/queues.py b/scheduler/queues.py
deleted file mode 100644
index f7796db..0000000
--- a/scheduler/queues.py
+++ /dev/null
@@ -1,150 +0,0 @@
-from typing import List, Dict, Set
-
-from .broker_types import ConnectionErrorTypes, BrokerMetaData
-from .rq_classes import JobExecution, DjangoQueue, DjangoWorker
-from .settings import SCHEDULER_CONFIG
-from .settings import logger, Broker
-
-_CONNECTION_PARAMS = {
- "URL",
- "DB",
- "USE_REDIS_CACHE",
- "UNIX_SOCKET_PATH",
- "HOST",
- "PORT",
- "PASSWORD",
- "SENTINELS",
- "MASTER_NAME",
- "SOCKET_TIMEOUT",
- "SSL",
- "CONNECTION_KWARGS",
-}
-
-
-class QueueNotFoundError(Exception):
- pass
-
-
-def _get_broker_connection(config, use_strict_broker=False):
- """
- Returns a redis connection from a connection config
- """
- if SCHEDULER_CONFIG.BROKER == Broker.FAKEREDIS:
- import fakeredis
-
- broker_cls = fakeredis.FakeRedis if not use_strict_broker else fakeredis.FakeStrictRedis
- else:
- broker_cls = BrokerMetaData[(SCHEDULER_CONFIG.BROKER, use_strict_broker)].connection_type
- logger.debug(f"Getting connection for {config}")
- if "URL" in config:
- ssl_url_protocol = BrokerMetaData[(SCHEDULER_CONFIG.BROKER, use_strict_broker)].ssl_prefix
- if config.get("SSL") or config.get("URL").startswith(f"{ssl_url_protocol}://"):
- return broker_cls.from_url(
- config["URL"],
- db=config.get("DB"),
- ssl_cert_reqs=config.get("SSL_CERT_REQS", "required"),
- )
- else:
- return broker_cls.from_url(
- config["URL"],
- db=config.get("DB"),
- )
- if "UNIX_SOCKET_PATH" in config:
- return broker_cls(unix_socket_path=config["UNIX_SOCKET_PATH"], db=config["DB"])
-
- if "SENTINELS" in config:
- connection_kwargs = {
- "db": config.get("DB"),
- "password": config.get("PASSWORD"),
- "username": config.get("USERNAME"),
- "socket_timeout": config.get("SOCKET_TIMEOUT"),
- }
- connection_kwargs.update(config.get("CONNECTION_KWARGS", {}))
- sentinel_kwargs = config.get("SENTINEL_KWARGS", {})
- SentinelClass = BrokerMetaData[(SCHEDULER_CONFIG.BROKER, use_strict_broker)].sentinel_type
- sentinel = SentinelClass(config["SENTINELS"], sentinel_kwargs=sentinel_kwargs, **connection_kwargs)
- return sentinel.master_for(
- service_name=config["MASTER_NAME"],
- redis_class=broker_cls,
- )
-
- return broker_cls(
- host=config["HOST"],
- port=config["PORT"],
- db=config.get("DB", 0),
- username=config.get("USERNAME", None),
- password=config.get("PASSWORD"),
- ssl=config.get("SSL", False),
- ssl_cert_reqs=config.get("SSL_CERT_REQS", "required"),
- **config.get("CLIENT_KWARGS", {}),
- )
-
-
-def get_connection(queue_settings, use_strict_redis=False):
- """Returns a Broker connection to use based on parameters in SCHEDULER_QUEUES"""
- return _get_broker_connection(queue_settings, use_strict_redis)
-
-
-def get_queue(
- name="default", default_timeout=None, is_async=None, autocommit=None, connection=None, **kwargs
-) -> DjangoQueue:
- """Returns an DjangoQueue using parameters defined in `SCHEDULER_QUEUES`"""
- from .settings import QUEUES
-
- if name not in QUEUES:
- raise QueueNotFoundError(f"Queue {name} not found, queues={QUEUES.keys()}")
- queue_settings = QUEUES[name]
- if is_async is None:
- is_async = queue_settings.get("ASYNC", True)
-
- if default_timeout is None:
- default_timeout = queue_settings.get("DEFAULT_TIMEOUT")
- if connection is None:
- connection = get_connection(queue_settings)
- return DjangoQueue(
- name, default_timeout=default_timeout, connection=connection, is_async=is_async, autocommit=autocommit, **kwargs
- )
-
-
-def get_all_workers() -> Set[DjangoWorker]:
- from .settings import QUEUES
-
- workers_set: Set[DjangoWorker] = set()
- for queue_name in QUEUES:
- connection = get_connection(QUEUES[queue_name])
- try:
- curr_workers: Set[DjangoWorker] = set(DjangoWorker.all(connection=connection))
- workers_set.update(curr_workers)
- except ConnectionErrorTypes as e:
- logger.error(f"Could not connect for queue {queue_name}: {e}")
- return workers_set
-
-
-def _queues_share_connection_params(q1_params: Dict, q2_params: Dict):
- """Check that both queues share the same connection parameters"""
- return all(
- ((p not in q1_params and p not in q2_params) or (q1_params.get(p, None) == q2_params.get(p, None)))
- for p in _CONNECTION_PARAMS
- )
-
-
-def get_queues(*queue_names, **kwargs) -> List[DjangoQueue]:
- """Return queue instances from specified queue names.
- All instances must use the same Redis connection.
- """
- from .settings import QUEUES
-
- kwargs["job_class"] = JobExecution
- queue_params = QUEUES[queue_names[0]]
- queues = [get_queue(queue_names[0], **kwargs)]
- # perform consistency checks while building return list
- for name in queue_names[1:]:
- if not _queues_share_connection_params(queue_params, QUEUES[name]):
- raise ValueError(
- f'Queues must have the same broker connection. "{name}" and'
- f' "{queue_names[0]}" have different connections'
- )
- queue = get_queue(name, **kwargs)
- queues.append(queue)
-
- return queues
diff --git a/scheduler/redis_models/__init__.py b/scheduler/redis_models/__init__.py
new file mode 100644
index 0000000..14876b9
--- /dev/null
+++ b/scheduler/redis_models/__init__.py
@@ -0,0 +1,20 @@
+__all__ = [
+ "Result",
+ "ResultType",
+ "as_str",
+ "Callback",
+ "SchedulerLock",
+ "WorkerModel",
+ "DequeueTimeout",
+ "KvLock",
+ "JobStatus",
+ "JobModel",
+]
+
+from .base import as_str
+from scheduler.helpers.callback import Callback
+from .job import JobStatus, JobModel
+from .lock import SchedulerLock, KvLock
+from .registry.base_registry import DequeueTimeout
+from .result import Result, ResultType
+from .worker import WorkerModel
diff --git a/scheduler/redis_models/base.py b/scheduler/redis_models/base.py
new file mode 100644
index 0000000..095f6e8
--- /dev/null
+++ b/scheduler/redis_models/base.py
@@ -0,0 +1,221 @@
+import dataclasses
+import json
+from collections.abc import Sequence
+from datetime import datetime, UTC
+from enum import Enum
+from typing import List, Optional, Union, Self, Dict, Collection, Any, ClassVar, Set, Type
+
+from redis import Redis
+
+from scheduler.broker_types import ConnectionType
+from scheduler.settings import logger
+
+MAX_KEYS = 1000
+
+
+def as_str(v: Union[bytes, str]) -> Optional[str]:
+ """Converts a `bytes` value to a string using `utf-8`.
+
+ :param v: The value (None/bytes/str)
+ :raises: ValueError: If the value is not `bytes` or `str`
+ :returns: Either the decoded string or None
+ """
+ if v is None or isinstance(v, str):
+ return v
+ if isinstance(v, bytes):
+ return v.decode("utf-8")
+ raise ValueError("Unknown type %r" % type(v))
+
+
+def decode_dict(d: Dict[bytes, bytes], exclude_keys: Set[str]) -> Dict[str, str]:
+ return {
+ k.decode(): v.decode()
+ for (k, v) in d.items()
+ if k.decode() not in exclude_keys
+ }
+
+
+def _serialize(value: Any) -> Optional[str]:
+ if value is None:
+ return None
+ if isinstance(value, bool):
+ value = int(value)
+ elif isinstance(value, Enum):
+ value = value.value
+ elif isinstance(value, datetime):
+ value = value.isoformat()
+ elif isinstance(value, dict):
+ value = json.dumps(value)
+ elif isinstance(value, (int, float)):
+ return str(value)
+ elif isinstance(value, (list, set, tuple)):
+ return json.dumps(value)
+ return str(value)
+
+
+def _deserialize(value: str, _type: Type) -> Any:
+ if value is None:
+ return None
+ if _type is str or _type == Optional[str]:
+ return as_str(value)
+ if _type is datetime or _type == Optional[datetime]:
+ return datetime.fromisoformat(as_str(value))
+ elif _type is bool:
+ return bool(int(value))
+ elif _type is int or _type == Optional[int]:
+ return int(value)
+ elif _type is float or _type == Optional[float]:
+ return float(value)
+ elif _type in {List[Any], List[str], Dict[str, str]}:
+ return json.loads(value)
+ elif issubclass(_type, Enum):
+ return _type(as_str(value))
+ return value
+
+
+@dataclasses.dataclass(slots=True, kw_only=True)
+class BaseModel:
+ name: str
+ _element_key_template: ClassVar[str] = ":element:{}"
+
+ @classmethod
+ def key_for(cls, name: str) -> str:
+ return cls._element_key_template.format(name)
+
+ @property
+ def _key(self) -> str:
+ return self._element_key_template.format(self.name)
+
+ def serialize(self) -> Dict[str, str]:
+ data = dataclasses.asdict(self, dict_factory=lambda fields: {
+ key: value
+ for (key, value) in fields
+ if value is not None
+ })
+ for k in data:
+ data[k] = _serialize(data[k])
+ return data
+
+ @classmethod
+ def deserialize(cls, data: Dict[str, Any]) -> Self:
+ types = {f.name: f.type for f in dataclasses.fields(cls)}
+ for k in data:
+ if k not in types:
+ logger.warning(f"Unknown field {k} in WorkerModel")
+ continue
+ data[k] = _deserialize(data[k], types[k])
+ return cls(**data)
+
+
+@dataclasses.dataclass(slots=True, kw_only=True)
+class HashModel(BaseModel):
+ created_at: Optional[datetime] = None
+ parent: Optional[str] = None
+ _list_key: ClassVar[str] = ":list_all:"
+ _children_key_template: ClassVar[str] = ":children:{}:"
+ _byte_keys: ClassVar[Set[str]] = set()
+
+ @property
+ def _parent_key(self) -> Optional[str]:
+ if self.parent is None:
+ return None
+ return self._children_key_template.format(self.parent)
+
+ @classmethod
+ def all_names(cls, connection: Redis, parent: Optional[str] = None) -> Collection[str]:
+ collection_key = cls._children_key_template.format(parent) if parent else cls._list_key
+ collection_members = connection.smembers(collection_key)
+ return [r.decode() for r in collection_members]
+
+ @classmethod
+ def all(cls, connection: Redis, parent: Optional[str] = None) -> List[Self]:
+ keys = cls.all_names(connection, parent)
+ items = [cls.get(k, connection) for k in keys]
+ return [w for w in items if w is not None]
+
+ @classmethod
+ def exists(cls, name: str, connection: ConnectionType) -> bool:
+ return connection.exists(cls._element_key_template.format(name)) > 0
+
+ @classmethod
+ def delete_many(cls, names: List[str], connection: ConnectionType) -> None:
+ for name in names:
+ connection.delete(cls._element_key_template.format(name))
+
+ @classmethod
+ def get(cls, name: str, connection: ConnectionType) -> Optional[Self]:
+ res = connection.hgetall(cls._element_key_template.format(name))
+ if not res:
+ return None
+ return cls.deserialize(decode_dict(res, cls._byte_keys))
+
+ @classmethod
+ def get_many(cls, names: Sequence[str], connection: ConnectionType) -> List[Self]:
+ pipeline = connection.pipeline()
+ for name in names:
+ pipeline.hgetall(cls._element_key_template.format(name))
+ values = pipeline.execute()
+ return [
+ (cls.deserialize(decode_dict(v, cls._byte_keys)) if v else None)
+ for v in values
+ ]
+
+ def save(self, connection: ConnectionType) -> None:
+ connection.sadd(self._list_key, self.name)
+ if self._parent_key is not None:
+ connection.sadd(self._parent_key, self.name)
+ mapping = self.serialize()
+ connection.hset(self._key, mapping=mapping)
+
+ def delete(self, connection: ConnectionType) -> None:
+ connection.srem(self._list_key, self._key)
+ if self._parent_key is not None:
+ connection.srem(self._parent_key, 0, self._key)
+ connection.delete(self._key)
+
+ @classmethod
+ def count(cls, connection: ConnectionType, parent: Optional[str] = None) -> int:
+ if parent is not None:
+ result = connection.scard(cls._children_key_template.format(parent))
+ else:
+ result = connection.scard(cls._list_key)
+ return result
+
+ def get_field(self, field: str, connection: ConnectionType) -> Any:
+ types = {f.name: f.type for f in dataclasses.fields(self)}
+ res = connection.hget(self._key, field)
+ return _deserialize(res, types[field])
+
+ def set_field(self, field: str, value: Any, connection: ConnectionType, set_attribute: bool = True) -> None:
+ if not hasattr(self, field):
+ raise AttributeError(f"Field {field} does not exist")
+ if set_attribute:
+ setattr(self, field, value)
+ if value is None:
+ connection.hdel(self._key, field)
+ return
+ value = _serialize(value)
+ connection.hset(self._key, field, value)
+
+
+@dataclasses.dataclass(slots=True, kw_only=True)
+class StreamModel(BaseModel):
+ _children_key_template: ClassVar[str] = ":children:{}:"
+
+ def __init__(self, name: str, parent: str, created_at: Optional[datetime] = None):
+ self.name = name
+ self.created_at: datetime = created_at or datetime.now(UTC)
+ self.parent: str = parent
+
+ @property
+ def _parent_key(self) -> str:
+ return self._children_key_template.format(self.parent)
+
+ @classmethod
+ def all(cls, connection: ConnectionType, parent: str) -> List[Self]:
+ results = connection.xrevrange(cls._children_key_template.format(parent), "+", "-")
+ return [cls(**decode_dict(result[1], exclude_keys=set())) for result in results]
+
+ def save(self, connection: ConnectionType) -> bool:
+ result = connection.xadd(self._parent_key, self.serialize(), maxlen=10)
+ return bool(result)
diff --git a/scheduler/redis_models/job.py b/scheduler/redis_models/job.py
new file mode 100644
index 0000000..1177e2e
--- /dev/null
+++ b/scheduler/redis_models/job.py
@@ -0,0 +1,321 @@
+import dataclasses
+import inspect
+import numbers
+from datetime import datetime
+from enum import Enum
+from typing import ClassVar, Dict, Optional, List, Callable, Any, Union, Tuple, Self, Iterable
+from uuid import uuid4
+
+from scheduler.helpers import utils
+from scheduler.broker_types import ConnectionType, FunctionReferenceType
+from scheduler.redis_models import Callback
+from scheduler.redis_models.base import HashModel, as_str
+from scheduler.redis_models.registry.base_registry import JobNamesRegistry
+from scheduler.settings import SCHEDULER_CONFIG
+
+
+class TimeoutFormatError(Exception):
+ pass
+
+
+def get_call_string(
+ func_name: Optional[str], args: Any, kwargs: Dict[Any, Any], max_length: Optional[int] = None
+) -> Optional[str]:
+ """
+ Returns a string representation of the call, formatted as a regular
+ Python function invocation statement. If max_length is not None, truncate
+ arguments with representation longer than max_length.
+
+ Args:
+ func_name (str): The funtion name
+ args (Any): The function arguments
+ kwargs (Dict[Any, Any]): The function kwargs
+ max_length (int, optional): The max length. Defaults to None.
+
+ Returns:
+ str: A String representation of the function call.
+ """
+ if func_name is None:
+ return None
+
+ arg_list = [as_str(_truncate_long_string(repr(arg), max_length)) for arg in args]
+
+ list_kwargs = ["{0}={1}".format(k, as_str(_truncate_long_string(repr(v), max_length))) for k, v in kwargs.items()]
+ arg_list += sorted(list_kwargs)
+ args = ", ".join(arg_list)
+
+ return "{0}({1})".format(func_name, args)
+
+
+class JobStatus(str, Enum):
+ """The Status of Job within its lifecycle at any given time."""
+
+ QUEUED = "queued"
+ FINISHED = "finished"
+ FAILED = "failed"
+ STARTED = "started"
+ SCHEDULED = "scheduled"
+ STOPPED = "stopped"
+ CANCELED = "canceled"
+
+
+@dataclasses.dataclass(slots=True, kw_only=True)
+class JobModel(HashModel):
+ _list_key: ClassVar[str] = ":jobs:"
+ _children_key_template: ClassVar[str] = ":{}:jobs:"
+ _element_key_template: ClassVar[str] = ":jobs:{}"
+
+ queue_name: str
+ description: str
+ func_name: str
+
+ args: List[Any]
+ kwargs: Dict[str, str]
+ timeout: int = SCHEDULER_CONFIG.DEFAULT_JOB_TIMEOUT
+ result_ttl: int = SCHEDULER_CONFIG.DEFAULT_RESULT_TTL
+ ttl: int = SCHEDULER_CONFIG.DEFAULT_WORKER_TTL
+ status: JobStatus
+ created_at: datetime
+ meta: Dict[str, str]
+ at_front: bool = False
+ retries_left: Optional[int] = None
+ retry_intervals: Optional[List[int]] = None
+ last_heartbeat: Optional[datetime] = None
+ worker_name: Optional[str] = None
+ started_at: Optional[datetime] = None
+ enqueued_at: Optional[datetime] = None
+ ended_at: Optional[datetime] = None
+ success_callback_name: Optional[str] = None
+ success_callback_timeout: int = SCHEDULER_CONFIG.CALLBACK_TIMEOUT
+ failure_callback_name: Optional[str] = None
+ failure_callback_timeout: int = SCHEDULER_CONFIG.CALLBACK_TIMEOUT
+ stopped_callback_name: Optional[str] = None
+ stopped_callback_timeout: int = SCHEDULER_CONFIG.CALLBACK_TIMEOUT
+ task_type: Optional[str] = None
+ scheduled_task_id: Optional[int] = None
+
+ def serialize(self) -> Dict[str, str]:
+ res = super(JobModel, self).serialize()
+ return res
+
+ def __hash__(self):
+ return hash(self.name)
+
+ def __eq__(self, other): # noqa
+ return isinstance(other, self.__class__) and self.name == other.name
+
+ def __str__(self):
+ return f"{self.name}: {self.description}"
+
+ def get_status(self, connection: ConnectionType) -> JobStatus:
+ broker_value = self.get_field("status", connection=connection)
+ return JobStatus(broker_value)
+
+ def set_status(self, status: JobStatus, connection: ConnectionType) -> None:
+ """Set's the Job Status"""
+ self.set_field("status", status, connection=connection)
+
+ def is_execution_of(self, task: "Task") -> bool:
+ return self.scheduled_task_id == task.id and self.task_type == task.task_type
+
+ @property
+ def is_queued(self) -> bool:
+ return self.status == JobStatus.QUEUED
+
+ @property
+ def is_canceled(self) -> bool:
+ return self.status == JobStatus.CANCELED
+
+ @property
+ def is_failed(self) -> bool:
+ return self.status == JobStatus.FAILED
+
+ @property
+ def func(self) -> Callable[[Any], Any]:
+ return utils.callable_func(self.func_name)
+
+ @property
+ def is_scheduled_task(self) -> bool:
+ return self.scheduled_task_id is not None
+
+ def expire(self, ttl: int, connection: ConnectionType) -> None:
+ """Expire the Job Model if ttl >= 0"""
+ if ttl == 0:
+ self.delete(connection=connection)
+ elif ttl > 0:
+ connection.expire(self._key, ttl)
+
+ def persist(self, connection: ConnectionType) -> None:
+ connection.persist(self._key)
+
+ def prepare_for_execution(self, worker_name: str, registry: JobNamesRegistry, connection: ConnectionType) -> None:
+ """Prepares the job for execution, setting the worker name,
+ heartbeat information, status and other metadata before execution begins.
+ :param worker_name: The name of the worker
+ :param registry: The registry to add the job to
+ :param connection: The connection to the broker
+ """
+ self.worker_name = worker_name
+ self.last_heartbeat = utils.utcnow()
+ self.started_at = self.last_heartbeat
+ self.status = JobStatus.STARTED
+ registry.add(connection, self.name, self.last_heartbeat.timestamp())
+ self.save(connection=connection)
+
+ @property
+ def failure_callback(self) -> Optional[Callback]:
+ if self.failure_callback_name is None:
+ return None
+ return Callback(self.failure_callback_name, self.failure_callback_timeout)
+
+ @property
+ def success_callback(self) -> Optional[Callable[..., Any]]:
+ if self.success_callback_name is None:
+ return None
+ return Callback(self.success_callback_name, self.success_callback_timeout)
+
+ @property
+ def stopped_callback(self) -> Optional[Callable[..., Any]]:
+ if self.stopped_callback_name is None:
+ return None
+ return Callback(self.stopped_callback_name, self.stopped_callback_timeout)
+
+ def get_call_string(self):
+ return get_call_string(self.func_name, self.args, self.kwargs)
+
+
+ @classmethod
+ def create(
+ cls,
+ connection: ConnectionType,
+ func: FunctionReferenceType,
+ queue_name: str,
+ args: Union[List[Any], Optional[Tuple]] = None,
+ kwargs: Optional[Dict[str, Any]] = None,
+ result_ttl: Optional[int] = None,
+ ttl: Optional[int] = None,
+ status: Optional[JobStatus] = None,
+ description: Optional[str] = None,
+ timeout: Optional[int] = None,
+ name: Optional[str] = None,
+ task_type: Optional[str] = None,
+ scheduled_task_id: Optional[int] = None,
+ meta: Optional[Dict[str, Any]] = None,
+ *,
+ on_success: Optional[Callback] = None,
+ on_failure: Optional[Callback] = None,
+ on_stopped: Optional[Callback] = None,
+ at_front: Optional[bool] = None,
+ retries_left: Optional[int] = None,
+ retry_intervals: Union[int, List[int], None] = None,
+ ) -> Self:
+ """Creates a new job-model for the given function, arguments, and keyword arguments.
+ :returns: A job-model instance.
+ """
+ args = args or []
+ kwargs = kwargs or {}
+ timeout = _parse_timeout(timeout) or SCHEDULER_CONFIG.DEFAULT_JOB_TIMEOUT
+ if timeout == 0:
+ raise ValueError("0 timeout is not allowed. Use -1 for infinite timeout")
+ ttl = _parse_timeout(ttl or SCHEDULER_CONFIG.DEFAULT_RESULT_TTL)
+ if ttl is not None and ttl <= 0:
+ raise ValueError("Job ttl must be greater than 0")
+ result_ttl = _parse_timeout(result_ttl)
+ if not isinstance(args, (tuple, list)):
+ raise TypeError("{0!r} is not a valid args list".format(args))
+ if not isinstance(kwargs, dict):
+ raise TypeError("{0!r} is not a valid kwargs dict".format(kwargs))
+ if on_success and not isinstance(on_success, Callback):
+ raise ValueError("on_success must be a Callback object")
+ if on_failure and not isinstance(on_failure, Callback):
+ raise ValueError("on_failure must be a Callback object")
+ if on_stopped and not isinstance(on_stopped, Callback):
+ raise ValueError("on_stopped must be a Callback object")
+ name = name or str(uuid4())
+
+ if inspect.ismethod(func):
+ _func_name = func.__name__
+
+ elif inspect.isfunction(func) or inspect.isbuiltin(func):
+ _func_name = "{0}.{1}".format(func.__module__, func.__qualname__)
+ elif isinstance(func, str):
+ _func_name = as_str(func)
+ elif not inspect.isclass(func) and hasattr(func, "__call__"): # a callable class instance
+ _func_name = "__call__"
+ else:
+ raise TypeError("Expected a callable or a string, but got: {0}".format(func))
+ description = description or get_call_string(func, args or [], kwargs or {}, max_length=75)
+
+ if retries_left is not None and retries_left < 1:
+ raise ValueError("max: please enter a value greater than 0")
+ if retry_intervals is None:
+ pass
+ elif isinstance(retry_intervals, int):
+ if retry_intervals < 0:
+ raise ValueError("interval: negative numbers are not allowed")
+ retry_intervals = [retry_intervals]
+ elif isinstance(retry_intervals, Iterable):
+ for i in retry_intervals:
+ if i < 0:
+ raise ValueError("interval: negative numbers are not allowed")
+ retry_intervals = retry_intervals
+
+ model = JobModel(
+ created_at=utils.utcnow(),
+ name=name,
+ queue_name=queue_name,
+ description=description,
+ func_name=_func_name,
+ args=args or [],
+ kwargs=kwargs or {},
+ at_front=at_front,
+ task_type=task_type,
+ scheduled_task_id=scheduled_task_id,
+ success_callback_name=on_success.name if on_success else None,
+ success_callback_timeout=on_success.timeout if on_success else None,
+ failure_callback_name=on_failure.name if on_failure else None,
+ failure_callback_timeout=on_failure.timeout if on_failure else None,
+ stopped_callback_name=on_stopped.name if on_stopped else None,
+ stopped_callback_timeout=on_stopped.timeout if on_stopped else None,
+ result_ttl=result_ttl,
+ ttl=ttl or SCHEDULER_CONFIG.DEFAULT_RESULT_TTL,
+ timeout=timeout,
+ status=status,
+ last_heartbeat=None,
+ meta=meta or {},
+ retry_intervals=retry_intervals,
+ retries_left=retries_left,
+ worker_name=None,
+ enqueued_at=None,
+ started_at=None,
+ ended_at=None,
+ )
+ model.save(connection=connection)
+ return model
+
+
+def _truncate_long_string(data: str, max_length: Optional[int] = None) -> str:
+ """Truncate arguments with representation longer than max_length"""
+ if max_length is None:
+ return data
+ return (data[:max_length] + "...") if len(data) > max_length else data
+
+
+def _parse_timeout(timeout: Union[int, float, str]) -> int:
+ """Transfer all kinds of timeout format to an integer representing seconds"""
+ if not isinstance(timeout, numbers.Integral) and timeout is not None:
+ try:
+ timeout = int(timeout)
+ except ValueError:
+ digit, unit = timeout[:-1], (timeout[-1:]).lower()
+ unit_second = {"d": 86400, "h": 3600, "m": 60, "s": 1}
+ try:
+ timeout = int(digit) * unit_second[unit]
+ except (ValueError, KeyError):
+ raise TimeoutFormatError(
+ "Timeout must be an integer or a string representing an integer, or "
+ 'a string with format: digits + unit, unit can be "d", "h", "m", "s", '
+ 'such as "1h", "23m".'
+ )
+
+ return timeout
diff --git a/scheduler/redis_models/lock.py b/scheduler/redis_models/lock.py
new file mode 100644
index 0000000..e5deeb5
--- /dev/null
+++ b/scheduler/redis_models/lock.py
@@ -0,0 +1,31 @@
+from typing import Optional, Any
+
+from scheduler.broker_types import ConnectionType
+
+
+class KvLock(object):
+ def __init__(self, name: str) -> None:
+ self.name = name
+ self.acquired = False
+
+ @property
+ def _locking_key(self) -> str:
+ return f"_lock:{self.name}"
+
+ def acquire(self, val: Any, connection: ConnectionType, expire: Optional[int] = None) -> bool:
+ self.acquired = connection.set(self._locking_key, val, nx=True, ex=expire)
+ return self.acquired
+
+ def expire(self, connection: ConnectionType, expire: Optional[int] = None) -> bool:
+ return connection.expire(self._locking_key, expire)
+
+ def release(self, connection: ConnectionType):
+ connection.delete(self._locking_key)
+
+ def value(self, connection: ConnectionType) -> Any:
+ return connection.get(self._locking_key)
+
+
+class SchedulerLock(KvLock):
+ def __init__(self, queue_name: str) -> None:
+ super().__init__(f"lock:scheduler:{queue_name}")
diff --git a/scheduler/redis_models/registry/__init__.py b/scheduler/redis_models/registry/__init__.py
new file mode 100644
index 0000000..0f05b1b
--- /dev/null
+++ b/scheduler/redis_models/registry/__init__.py
@@ -0,0 +1,16 @@
+__all__ = [
+ "JobNamesRegistry",
+ "FinishedJobRegistry",
+ "StartedJobRegistry",
+ "FailedJobRegistry",
+ "CanceledJobRegistry",
+ "ScheduledJobRegistry",
+ "QueuedJobRegistry",
+ "NoSuchJobError",
+]
+
+from .base_registry import JobNamesRegistry
+from .queue_registries import (FinishedJobRegistry, StartedJobRegistry,
+ FailedJobRegistry,
+ CanceledJobRegistry, ScheduledJobRegistry,
+ QueuedJobRegistry, NoSuchJobError)
diff --git a/scheduler/redis_models/registry/base_registry.py b/scheduler/redis_models/registry/base_registry.py
new file mode 100644
index 0000000..b71d77b
--- /dev/null
+++ b/scheduler/redis_models/registry/base_registry.py
@@ -0,0 +1,121 @@
+import dataclasses
+from collections.abc import Sequence
+from typing import ClassVar, Optional, List, Self, Tuple, Any
+
+from scheduler.broker_types import ConnectionType
+from scheduler.redis_models.base import as_str, BaseModel
+from scheduler.settings import logger
+from scheduler.helpers.utils import current_timestamp
+
+
+class DequeueTimeout(Exception):
+ pass
+
+
+@dataclasses.dataclass(slots=True, kw_only=True)
+class ZSetModel(BaseModel):
+
+ def cleanup(self, connection: ConnectionType, timestamp: Optional[float] = None) -> None:
+ """Remove expired jobs from registry."""
+ score = timestamp or current_timestamp()
+ connection.zremrangebyscore(self._key, 0, score)
+
+ def count(self, connection: ConnectionType) -> int:
+ """Returns the number of jobs in this registry"""
+ self.cleanup(connection=connection)
+ return connection.zcard(self._key)
+
+ def add(
+ self,
+ connection: ConnectionType,
+ member: str,
+ score: float,
+ update_existing_only: bool = False) -> int:
+ return connection.zadd(self._key, {member: float(score)}, xx=update_existing_only)
+
+ def delete(self, connection: ConnectionType, job_name: str) -> None:
+ connection.zrem(self._key, job_name)
+
+
+class JobNamesRegistry(ZSetModel):
+ _element_key_template: ClassVar[str] = ":registry:{}"
+
+ def __init__(self, connection: ConnectionType, name: str) -> None:
+ super().__init__(name=name)
+ self.connection = connection
+
+ def __len__(self) -> int:
+ return self.count(self.connection)
+
+ def all(self, start: int = 0, end: int = -1) -> List[str]:
+ """Returns list of all job ids.
+
+ :param start: Start score/timestamp, default to 0.
+ :param end: End score/timestamp, default to -1 (i.e., no max score).
+ :returns: Returns list of all job ids with timestamp from start to end
+ """
+ self.cleanup(self.connection)
+ res = [as_str(job_id) for job_id in self.connection.zrange(self._key, start, end)]
+ logger.debug(f"Getting jobs for registry {self._key}: {len(res)} found.")
+ return res
+
+ def all_with_timestamps(self, start: int = 0, end: int = -1) -> List[tuple[str, float]]:
+ """Returns list of all job ids with their timestamps.
+
+ :param start: Start score/timestamp, default to 0.
+ :param end: End score/timestamp, default to -1 (i.e., no max score).
+ :returns: Returns list of all job ids with timestamp from start to end
+ """
+ self.cleanup(self.connection)
+ res = self.connection.zrange(self._key, start, end, withscores=True)
+ logger.debug(f"Getting jobs for registry {self._key}: {len(res)} found.")
+ return [(as_str(job_id), timestamp) for job_id, timestamp in res]
+
+ def get_first(self) -> Optional[str]:
+ """Returns the first job in the registry."""
+ self.cleanup(self.connection)
+ first_job = self.connection.zrange(self._key, 0, 0)
+ return first_job[0].decode() if first_job else None
+
+ def get_last_timestamp(self) -> Optional[float]:
+ """Returns the last timestamp in the registry."""
+ self.cleanup(self.connection)
+ last_timestamp = self.connection.zrange(self._key, -1, -1, withscores=True)
+ return last_timestamp[0][1] if last_timestamp else None
+
+ @property
+ def key(self) -> str:
+ return self._key
+
+ @classmethod
+ def pop(
+ cls, connection: ConnectionType, registries: Sequence[Self], timeout: Optional[int]
+ ) -> Tuple[Optional[str], Optional[Tuple[str, float]]]:
+ """Helper method to abstract away from some Redis API details
+
+ :param connection: Broker connection
+ :param registries: List of registries to pop from
+ :param timeout: Timeout in seconds
+ :raises ValueError: If timeout of 0 was passed
+ :raises DequeueTimeout: BLPOP Timeout
+ :returns: Tuple of registry key and job id
+ """
+ if timeout == 0:
+ raise ValueError("Indefinite timeout not supported. Please pick a timeout value > 0")
+ registry_keys = [r.key for r in registries]
+ if timeout is not None: # blocking variant
+ colored_registries = ", ".join(map(str, [str(registry) for registry in registry_keys]))
+ logger.debug(f"Starting BZMPOP operation for queues {colored_registries} with timeout of {timeout}")
+ result = connection.bzpopmin(registry_keys, timeout)
+ if not result:
+ logger.debug(f"BZMPOP timeout, no jobs found on queues {colored_registries}")
+ raise DequeueTimeout(timeout, registry_keys)
+ registry_key, job_id = result
+ return registry_key, job_id
+ else: # non-blocking variant
+ for registry_key in registry_keys:
+ results: List[Any] = connection.zpopmin(registry_key)
+ if results:
+ job_id, timestamp = results[0]
+ return registry_key, job_id
+ return None, None
diff --git a/scheduler/redis_models/registry/queue_registries.py b/scheduler/redis_models/registry/queue_registries.py
new file mode 100644
index 0000000..611c9ce
--- /dev/null
+++ b/scheduler/redis_models/registry/queue_registries.py
@@ -0,0 +1,116 @@
+import time
+from datetime import datetime, timedelta, timezone
+from typing import ClassVar, Optional, List, Tuple
+
+from scheduler.broker_types import ConnectionType
+from scheduler.helpers.utils import current_timestamp
+from .base_registry import JobNamesRegistry
+from .. import as_str
+from ..job import JobModel
+
+
+class NoSuchJobError(Exception):
+ pass
+
+
+class QueuedJobRegistry(JobNamesRegistry):
+ _element_key_template: ClassVar[str] = ":registry:{}:queued_jobs"
+
+ def cleanup(self, connection: ConnectionType, timestamp: Optional[float] = None) -> None:
+ """This method is only here to prevent errors because this method is automatically called by `count()`
+ and `get_job_ids()` methods implemented in JobIdsRegistry."""
+ pass
+
+ def compact(self):
+ """Removes all "dead" jobs from the queue by cycling through it, while guaranteeing FIFO semantics. """
+ compact_queue_name = f"{self._key}:compact"
+
+ jobs_with_ts = self.all_with_timestamps()
+
+ self.connection.rename(self._key, compact_queue_name)
+
+ for job_name, timestamp in jobs_with_ts:
+ if job_name is None:
+ continue
+ if JobModel.exists(job_name, self.connection):
+ self.delete(connection=self.connection, job_name=job_name)
+
+
+class FinishedJobRegistry(JobNamesRegistry):
+ _element_key_template: ClassVar[str] = ":registry:{}:finished_jobs"
+
+
+class FailedJobRegistry(JobNamesRegistry):
+ _element_key_template: ClassVar[str] = ":registry:{}:failed_jobs"
+
+
+class CanceledJobRegistry(JobNamesRegistry):
+ _element_key_template: ClassVar[str] = ":registry:{}:canceled_jobs"
+
+ def cleanup(self, connection: ConnectionType, timestamp: Optional[float] = None) -> None:
+ """This method is only here to prevent errors because this method is automatically called by `count()`
+ and `get_job_ids()` methods implemented in JobIdsRegistry."""
+ pass
+
+
+class ScheduledJobRegistry(JobNamesRegistry):
+ _element_key_template: ClassVar[str] = ":registry:{}:scheduled_jobs"
+
+ def cleanup(self, connection: ConnectionType, timestamp: Optional[float] = None) -> None:
+ """This method is only here to prevent errors because this method is automatically called by `count()`
+ and `get_job_ids()` methods implemented in JobIdsRegistry."""
+ pass
+
+ def schedule(self, connection: ConnectionType, job: JobModel, scheduled_datetime):
+ """
+ Adds job to registry, scored by its execution time (in UTC).
+ If datetime has no tzinfo, it will assume localtimezone.
+ """
+ # If datetime has no timezone, assume server's local timezone
+ if not scheduled_datetime.tzinfo:
+ tz = timezone(timedelta(seconds=-(time.timezone if time.daylight == 0 else time.altzone)))
+ scheduled_datetime = scheduled_datetime.replace(tzinfo=tz)
+
+ timestamp = scheduled_datetime.timestamp()
+ return connection.zadd(self._key, {job.name: timestamp})
+
+ def get_jobs_to_schedule(self, timestamp: int, chunk_size: int = 1000) -> List[str]:
+ """Gets a list of job IDs that should be scheduled.
+
+ :param timestamp: timestamp/score of jobs in SortedSet.
+ :param chunk_size: Max results to return.
+ :returns: A list of Job ids
+ """
+ score = timestamp
+ jobs_to_schedule = self.connection.zrangebyscore(self._key, 0, score, start=0, num=chunk_size)
+ return [as_str(job_id) for job_id in jobs_to_schedule]
+
+ def get_scheduled_time(self, job_name: str) -> datetime:
+ """Returns datetime (UTC) at which job is scheduled to be enqueued
+
+ :param job_name: Job name
+ :raises NoSuchJobError: If the job was not found
+ :returns: The scheduled time as datetime object
+ """
+
+ score = self.connection.zscore(self._key, job_name)
+ if not score:
+ raise NoSuchJobError
+
+ return datetime.fromtimestamp(score, tz=timezone.utc)
+
+
+class StartedJobRegistry(JobNamesRegistry):
+ """Registry of currently executing jobs. Each queue maintains a StartedJobRegistry."""
+ _element_key_template: ClassVar[str] = ":registry:{}:started_jobs"
+
+ def get_job_names_before(self, connection: ConnectionType, timestamp: Optional[float]) -> List[Tuple[str, float]]:
+ """Returns job names whose score is lower than a timestamp timestamp.
+
+ Returns names for jobs with an expiry time earlier than timestamp,
+ specified as seconds since the Unix epoch.
+ timestamp defaults to calltime if unspecified.
+ """
+ score = timestamp or current_timestamp()
+ jobs_before = connection.zrangebyscore(self._key, 0, score, withscores=True)
+ return [(as_str(job_name), score) for (job_name, score) in jobs_before]
diff --git a/scheduler/redis_models/result.py b/scheduler/redis_models/result.py
new file mode 100644
index 0000000..bb65366
--- /dev/null
+++ b/scheduler/redis_models/result.py
@@ -0,0 +1,70 @@
+import dataclasses
+from datetime import datetime
+from enum import Enum
+from typing import Optional, Any, Self, ClassVar, List
+
+from scheduler.broker_types import ConnectionType
+from scheduler.redis_models.base import StreamModel, decode_dict
+from scheduler.helpers.utils import utcnow
+
+
+class ResultType(Enum):
+ SUCCESSFUL = 1
+ FAILED = 2
+ STOPPED = 3
+
+
+@dataclasses.dataclass(slots=True, kw_only=True)
+class Result(StreamModel):
+ parent: str
+ type: ResultType
+ ttl: Optional[int] = 0
+ name: Optional[str] = None
+ created_at: datetime = dataclasses.field(default_factory=utcnow)
+ return_value: Optional[Any] = None
+ exc_string: Optional[str] = None
+
+ _list_key: ClassVar[str] = ":job-results:"
+ _children_key_template: ClassVar[str] = ":job-results:{}:"
+ _element_key_template: ClassVar[str] = ":job-results:{}"
+
+ @classmethod
+ def create(
+ cls,
+ connection: ConnectionType,
+ job_name: str,
+ _type: ResultType,
+ ttl: int,
+ return_value: Any = None,
+ exc_string: Optional[str] = None,
+ ) -> Self:
+ result = cls(parent=job_name, ttl=ttl, type=_type, return_value=return_value, exc_string=exc_string)
+ result.save(connection)
+ return result
+
+ @classmethod
+ def fetch_latest(cls, connection: ConnectionType, job_name: str) -> Optional["Result"]:
+ """Returns the latest result for given job_id.
+
+ :param connection: Broker connection.
+ :param job_name: Job ID.
+ :return: Result instance or None if no result is available.
+ """
+ response: List[Any] = connection.xrevrange(cls._children_key_template.format(job_name), "+", "-", count=1)
+ if not response:
+ return None
+ result_id, payload = response[0]
+ res = cls(**decode_dict(payload, set()))
+ return res
+
+ def __repr__(self):
+ return f"Result(name={self.name}, type={self.type.name})"
+
+ def __eq__(self, other: Self) -> bool:
+ try:
+ return self.name == other.name
+ except AttributeError:
+ return False
+
+ def __bool__(self) -> bool:
+ return bool(self.name)
diff --git a/scheduler/redis_models/worker.py b/scheduler/redis_models/worker.py
new file mode 100644
index 0000000..fbd0fae
--- /dev/null
+++ b/scheduler/redis_models/worker.py
@@ -0,0 +1,122 @@
+import dataclasses
+from datetime import datetime
+from enum import Enum
+from typing import List, Optional, Self, ClassVar, Any, Generator
+
+from scheduler.broker_types import ConnectionType
+from scheduler.helpers.utils import utcnow
+from scheduler.redis_models.base import HashModel, MAX_KEYS
+from scheduler.settings import logger
+
+DEFAULT_WORKER_TTL = 420
+
+
+class WorkerStatus(str, Enum):
+ STARTING = "starting"
+ STARTED = "started"
+ SUSPENDED = "suspended"
+ BUSY = "busy"
+ IDLE = "idle"
+
+
+@dataclasses.dataclass(slots=True, kw_only=True)
+class WorkerModel(HashModel):
+ name: str
+ queue_names: List[str]
+ birth: datetime = dataclasses.field(default_factory=utcnow)
+ last_heartbeat: datetime
+ pid: int
+ hostname: str
+ ip_address: str
+ version: str
+ python_version: str
+ state: WorkerStatus
+ job_execution_process_pid: int = 0
+ successful_job_count: int = 0
+ failed_job_count: int = 0
+ completed_jobs: int = 0
+ is_suspended: bool = False
+ current_job_name: Optional[str] = None
+ stopped_job_name: Optional[str] = None
+ total_working_time: float = 0
+ current_job_working_time: float = 0
+ last_cleaned_at: Optional[datetime] = None
+ shutdown_requested_date: Optional[datetime] = None
+ scheduler_pid: Optional[int] = None
+ death: Optional[datetime] = None
+
+ _list_key: ClassVar[str] = ":workers:"
+ _children_key_template: ClassVar[str] = ":{}:workers:"
+ _element_key_template: ClassVar[str] = ":workers:{}"
+
+ def save(self, connection: ConnectionType) -> None:
+ pipeline = connection.pipeline()
+ super(WorkerModel, self).save(pipeline)
+ for queue_name in self.queue_names:
+ pipeline.sadd(self._children_key_template.format(queue_name), self.name)
+ pipeline.expire(self._key, DEFAULT_WORKER_TTL + 60)
+ pipeline.execute()
+
+ def delete(self, connection: ConnectionType) -> None:
+ logger.debug(f"Deleting worker {self.name}")
+ pipeline = connection.pipeline()
+ now = utcnow()
+ for queue_name in self.queue_names:
+ pipeline.srem(self._children_key_template.format(queue_name), self.name)
+ self.death = now
+ pipeline.hset(self._key, "death", now.isoformat())
+ pipeline.expire(self._key, 60)
+ connection.srem(self._list_key, self.name)
+ pipeline.execute()
+
+ def __eq__(self, other: Self) -> bool:
+ if not isinstance(other, self.__class__):
+ raise TypeError("Cannot compare workers to other types (of workers)")
+ return self._key == other._key
+
+ def __hash__(self):
+ """The hash does not take the database/connection into account"""
+ return hash((self._key, ",".join(self.queue_names)))
+
+ def __str__(self):
+ return f"{self._key}/{','.join(self.queue_names)}"
+
+ def set_current_job_working_time(self, job_execution_time: int, connection: ConnectionType) -> None:
+ self.set_field("current_job_working_time", job_execution_time, connection=connection)
+
+ def heartbeat(self, connection: ConnectionType, timeout: Optional[int] = None) -> None:
+ timeout = timeout or DEFAULT_WORKER_TTL + 60
+ connection.expire(self._key, timeout)
+ now = utcnow()
+ self.set_field("last_heartbeat", now, connection=connection)
+ logger.debug(f"Next heartbeat for worker {self._key} should arrive in {timeout} seconds.")
+
+ @classmethod
+ def cleanup(cls, connection: ConnectionType, queue_name: Optional[str] = None):
+ worker_keys = cls.all_names(connection, queue_name)
+ with connection.pipeline() as pipeline:
+ for worker_key in worker_keys:
+ pipeline.exists(worker_key)
+ worker_exist = pipeline.execute()
+ invalid_workers = list()
+ for i, worker_key in enumerate(worker_keys):
+ if not worker_exist[i]:
+ invalid_workers.append(worker_key)
+ if len(invalid_workers) == 0:
+ return
+ for invalid_subset in _split_list(invalid_workers, MAX_KEYS):
+ pipeline.srem(cls._list_key, *invalid_subset)
+ if queue_name:
+ pipeline.srem(cls._children_key_template.format(queue_name), *invalid_subset)
+ pipeline.execute()
+
+
+def _split_list(a_list: List[str], segment_size: int) -> Generator[list[str], Any, None]:
+ """Splits a list into multiple smaller lists having size `segment_size`
+
+ :param a_list: The list to split
+ :param segment_size: The segment size to split into
+ :returns: The list split into smaller lists
+ """
+ for i in range(0, len(a_list), segment_size):
+ yield a_list[i: i + segment_size]
diff --git a/scheduler/rq_classes.py b/scheduler/rq_classes.py
deleted file mode 100644
index a88bb00..0000000
--- a/scheduler/rq_classes.py
+++ /dev/null
@@ -1,280 +0,0 @@
-from typing import List, Optional, Union
-
-import django
-from django.apps import apps
-from rq import Worker
-from rq.command import send_stop_job_command
-from rq.decorators import job
-from rq.exceptions import InvalidJobOperation
-from rq.job import Job, JobStatus
-from rq.job import get_current_job # noqa
-from rq.queue import Queue, logger
-from rq.registry import (
- DeferredJobRegistry,
- FailedJobRegistry,
- FinishedJobRegistry,
- ScheduledJobRegistry,
- StartedJobRegistry,
- CanceledJobRegistry,
- BaseRegistry,
-)
-from rq.scheduler import RQScheduler
-from rq.worker import WorkerStatus
-
-from scheduler import settings
-from scheduler.broker_types import PipelineType, ConnectionType
-
-MODEL_NAMES = ["Task"]
-TASK_TYPES = ["OnceTaskType", "RepeatableTaskType", "CronTaskType"]
-
-rq_job_decorator = job
-ExecutionStatus = JobStatus
-InvalidJobOperation = InvalidJobOperation
-
-
-def register_sentry(sentry_dsn, **opts):
- from rq.contrib.sentry import register_sentry as rq_register_sentry
-
- rq_register_sentry(sentry_dsn, **opts)
-
-
-def as_str(v: Union[bytes, str]) -> Optional[str]:
- """Converts a `bytes` value to a string using `utf-8`.
-
- :param v: The value (None/bytes/str)
- :raises: ValueError: If the value is not `bytes` or `str`
- :returns: Either the decoded string or None
- """
- if v is None:
- return None
- if isinstance(v, bytes):
- return v.decode("utf-8")
- if isinstance(v, str):
- return v
- raise ValueError("Unknown type %r" % type(v))
-
-
-class JobExecution(Job):
- def __eq__(self, other) -> bool:
- return isinstance(other, Job) and self.id == other.id
-
- @property
- def is_scheduled_task(self) -> bool:
- return self.meta.get("scheduled_task_id", None) is not None
-
- def is_execution_of(self, task: "Task") -> bool: # noqa: F821
- return (
- self.meta.get("task_type", None) == task.task_type and self.meta.get("scheduled_task_id",
- None) == task.id
- )
-
- def stop_execution(self, connection: ConnectionType):
- send_stop_job_command(connection, self.id)
-
-
-class DjangoWorker(Worker):
- def __init__(self, *args, **kwargs):
- self.fork_job_execution = kwargs.pop("fork_job_execution", True)
- job_class = kwargs.get("job_class") or JobExecution
- if not isinstance(job_class, type) or not issubclass(job_class, JobExecution):
- raise ValueError("job_class must be a subclass of JobExecution")
-
- # Update kwargs with the potentially modified job_class
- kwargs["job_class"] = job_class
- kwargs["queue_class"] = DjangoQueue
- super(DjangoWorker, self).__init__(*args, **kwargs)
-
- def __eq__(self, other):
- return isinstance(other, Worker) and self.key == other.key and self.name == other.name
-
- def __hash__(self):
- return hash((self.name, self.key, ",".join(self.queue_names())))
-
- def __str__(self):
- return f"{self.name}/{','.join(self.queue_names())}"
-
- def _start_scheduler(
- self,
- burst: bool = False,
- logging_level: str = "INFO",
- date_format: str = "%H:%M:%S",
- log_format: str = "%(asctime)s %(message)s",
- ) -> None:
- """Starts the scheduler process.
- This is specifically designed to be run by the worker when running the `work()` method.
- Instantiates the DjangoScheduler and tries to acquire a lock.
- If the lock is acquired, start scheduler.
- If the worker is on burst mode, just enqueues scheduled jobs and quits,
- otherwise, starts the scheduler in a separate process.
-
- :param burst (bool, optional): Whether to work on burst mode. Defaults to False.
- :param logging_level (str, optional): Logging level to use. Defaults to "INFO".
- :param date_format (str, optional): Date Format. Defaults to DEFAULT_LOGGING_DATE_FORMAT.
- :param log_format (str, optional): Log Format. Defaults to DEFAULT_LOGGING_FORMAT.
- """
- self.scheduler = DjangoScheduler(
- self.queues,
- connection=self.connection,
- logging_level=logging_level,
- date_format=date_format,
- log_format=log_format,
- serializer=self.serializer,
- )
- self.scheduler.acquire_locks()
- if self.scheduler.acquired_locks:
- if burst:
- self.scheduler.enqueue_scheduled_jobs()
- self.scheduler.release_locks()
- else:
- proc = self.scheduler.start()
- self._set_property("scheduler_pid", proc.pid)
-
- def execute_job(self, job: "Job", queue: "Queue") -> None:
- if self.fork_job_execution:
- super(DjangoWorker, self).execute_job(job, queue)
- else:
- self.set_state(WorkerStatus.BUSY)
- self.perform_job(job, queue)
- self.set_state(WorkerStatus.IDLE)
-
- def work(self, **kwargs) -> bool:
- kwargs.setdefault("with_scheduler", True)
- return super(DjangoWorker, self).work(**kwargs)
-
- def _set_property(self, prop_name: str, val, pipeline: Optional[PipelineType] = None) -> None:
- connection = pipeline if pipeline is not None else self.connection
- if val is None:
- connection.hdel(self.key, prop_name)
- else:
- connection.hset(self.key, prop_name, val)
-
- def _get_property(self, prop_name: str, pipeline: Optional[PipelineType] = None) -> Optional[str]:
- connection = pipeline if pipeline is not None else self.connection
- res = connection.hget(self.key, prop_name)
- return as_str(res)
-
- def scheduler_pid(self) -> Optional[int]:
- if len(self.queues) == 0:
- logger.warning("No queues to get scheduler pid from")
- return None
- pid = self.connection.get(DjangoScheduler.get_locking_key(self.queues[0].name))
- return int(pid.decode()) if pid is not None else None
-
-
-class DjangoQueue(Queue):
- """A subclass of RQ's QUEUE that allows jobs to be stored temporarily to be enqueued later at the end of Django's
- request/response cycle."""
-
- REGISTRIES = dict(
- finished="finished_job_registry",
- failed="failed_job_registry",
- scheduled="scheduled_job_registry",
- started="started_job_registry",
- deferred="deferred_job_registry",
- canceled="canceled_job_registry",
- )
-
- def __init__(self, *args, **kwargs) -> None:
- kwargs["job_class"] = JobExecution
- super(DjangoQueue, self).__init__(*args, **kwargs)
-
- def get_registry(self, name: str) -> Union[None, BaseRegistry, "DjangoQueue"]:
- name = name.lower()
- if name == "queued":
- return self
- elif name in DjangoQueue.REGISTRIES:
- return getattr(self, DjangoQueue.REGISTRIES[name])
- return None
-
- @property
- def finished_job_registry(self) -> FinishedJobRegistry:
- return FinishedJobRegistry(self.name, self.connection)
-
- @property
- def started_job_registry(self) -> StartedJobRegistry:
- return StartedJobRegistry(
- self.name,
- self.connection,
- job_class=JobExecution,
- )
-
- @property
- def deferred_job_registry(self) -> DeferredJobRegistry:
- return DeferredJobRegistry(
- self.name,
- self.connection,
- job_class=JobExecution,
- )
-
- @property
- def failed_job_registry(self) -> FailedJobRegistry:
- return FailedJobRegistry(
- self.name,
- self.connection,
- job_class=JobExecution,
- )
-
- @property
- def scheduled_job_registry(self) -> ScheduledJobRegistry:
- return ScheduledJobRegistry(
- self.name,
- self.connection,
- job_class=JobExecution,
- )
-
- @property
- def canceled_job_registry(self) -> CanceledJobRegistry:
- return CanceledJobRegistry(
- self.name,
- self.connection,
- job_class=JobExecution,
- )
-
- def get_all_job_ids(self) -> List[str]:
- res = list()
- res.extend(self.get_job_ids())
- res.extend(self.finished_job_registry.get_job_ids())
- res.extend(self.started_job_registry.get_job_ids())
- res.extend(self.deferred_job_registry.get_job_ids())
- res.extend(self.failed_job_registry.get_job_ids())
- res.extend(self.scheduled_job_registry.get_job_ids())
- res.extend(self.canceled_job_registry.get_job_ids())
- return res
-
- def get_all_jobs(self) -> List[JobExecution]:
- job_ids = self.get_all_job_ids()
- return list(filter(lambda j: j is not None, [self.fetch_job(job_id) for job_id in job_ids]))
-
- def clean_registries(self) -> None:
- self.started_job_registry.cleanup()
- self.failed_job_registry.cleanup()
- self.finished_job_registry.cleanup()
-
- def remove_job_id(self, job_id: str) -> None:
- self.connection.lrem(self.key, 0, job_id)
-
- def last_job_id(self) -> Optional[str]:
- return self.connection.lindex(self.key, 0)
-
-
-class DjangoScheduler(RQScheduler):
- def __init__(self, *args, **kwargs) -> None:
- kwargs.setdefault("interval", settings.SCHEDULER_CONFIG.SCHEDULER_INTERVAL)
- super(DjangoScheduler, self).__init__(*args, **kwargs)
-
- @staticmethod
- def reschedule_all_jobs():
- for model_name in MODEL_NAMES:
- model = apps.get_model(app_label="scheduler", model_name=model_name)
- enabled_jobs = model.objects.filter(enabled=True)
- for item in enabled_jobs:
- logger.debug(f"Rescheduling {str(item)}")
- item.save()
-
- def work(self) -> None:
- django.setup()
- super(DjangoScheduler, self).work()
-
- def enqueue_scheduled_jobs(self) -> None:
- self.reschedule_all_jobs()
- super(DjangoScheduler, self).enqueue_scheduled_jobs()
diff --git a/scheduler/settings.py b/scheduler/settings.py
index db770be..78752e0 100644
--- a/scheduler/settings.py
+++ b/scheduler/settings.py
@@ -1,57 +1,34 @@
import logging
-from dataclasses import dataclass
-from enum import Enum
-from typing import Callable
+from typing import List, Dict
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
-logger = logging.getLogger(__package__)
+from scheduler._config_types import SchedulerConfig, Broker, QueueConfiguration
-QUEUES = dict()
+logger = logging.getLogger("scheduler")
+_QUEUES: Dict[str, QueueConfiguration] = dict()
+SCHEDULER_CONFIG: SchedulerConfig = SchedulerConfig()
-class Broker(Enum):
- REDIS = "redis"
- FAKEREDIS = "fakeredis"
- VALKEY = "valkey"
-
-@dataclass
-class SchedulerConfig:
- EXECUTIONS_IN_PAGE: int
- DEFAULT_RESULT_TTL: int
- DEFAULT_TIMEOUT: int
- SCHEDULER_INTERVAL: int
- BROKER: Broker
- TOKEN_VALIDATION_METHOD: Callable[[str], bool]
-
-
-def _token_validation(token: str) -> bool:
- return False
-
-
-SCHEDULER_CONFIG: SchedulerConfig = SchedulerConfig(
- EXECUTIONS_IN_PAGE=20,
- DEFAULT_RESULT_TTL=600,
- DEFAULT_TIMEOUT=300,
- SCHEDULER_INTERVAL=10,
- BROKER=Broker.REDIS,
- TOKEN_VALIDATION_METHOD=_token_validation,
-)
+class QueueNotFoundError(Exception):
+ pass
def conf_settings():
- global QUEUES
+ global _QUEUES
global SCHEDULER_CONFIG
-
- QUEUES = getattr(settings, "SCHEDULER_QUEUES", None)
- if QUEUES is None:
+ app_queues = getattr(settings, "SCHEDULER_QUEUES", None)
+ if app_queues is None:
logger.warning("Configuration using RQ_QUEUES is deprecated. Use SCHEDULER_QUEUES instead")
- QUEUES = getattr(settings, "RQ_QUEUES", None)
- if QUEUES is None:
+ app_queues = getattr(settings, "RQ_QUEUES", None)
+ if app_queues is None:
raise ImproperlyConfigured("You have to define SCHEDULER_QUEUES in settings.py")
+ for queue_name, queue_config in app_queues.items():
+ _QUEUES[queue_name] = QueueConfiguration(**queue_config)
+
user_settings = getattr(settings, "SCHEDULER_CONFIG", {})
if "FAKEREDIS" in user_settings:
logger.warning("Configuration using FAKEREDIS is deprecated. Use BROKER='fakeredis' instead")
@@ -64,3 +41,13 @@ def conf_settings():
conf_settings()
+
+
+def get_queue_names() -> List[str]:
+ return list(_QUEUES.keys())
+
+
+def get_queue_configuration(queue_name: str) -> QueueConfiguration:
+ if queue_name not in _QUEUES:
+ raise QueueNotFoundError(f"Queue {queue_name} not found, queues={_QUEUES.keys()}")
+ return _QUEUES[queue_name]
diff --git a/scheduler/templates/admin/scheduler/confirm_action.html b/scheduler/templates/admin/scheduler/confirm_action.html
index c61b8bf..69dd45c 100644
--- a/scheduler/templates/admin/scheduler/confirm_action.html
+++ b/scheduler/templates/admin/scheduler/confirm_action.html
@@ -22,7 +22,7 @@
{% for job in jobs %}
-
- {{ job.id }}
+ {{ job.name }}
{{ job | show_func_name }}
{% endfor %}
@@ -31,7 +31,7 @@
{% csrf_token %}
{% for job in jobs %}
-
+
{% endfor %}
diff --git a/scheduler/templates/admin/scheduler/job_detail.html b/scheduler/templates/admin/scheduler/job_detail.html
index 6892844..44badde 100644
--- a/scheduler/templates/admin/scheduler/job_detail.html
+++ b/scheduler/templates/admin/scheduler/job_detail.html
@@ -8,15 +8,15 @@
Home ›
Queues ›
{{ queue.name }} ›
-
{{ job.id }}
+
{{ job.name }}
{% endblock %}
{% block content_title %}
- Job {{ job.id }}
+
@@ -27,7 +27,7 @@ Job {{ job.id }}
{% if job.is_started %}
-
@@ -148,7 +135,7 @@
Job {{ job.id }}
{% endif %}
{% if job.is_failed %}
-
@@ -156,7 +143,7 @@
Job {{ job.id }}
{% endif %}
{% if not job.is_queued and not job.is_failed %}
-
diff --git a/scheduler/templates/admin/scheduler/jobs-list.partial.html b/scheduler/templates/admin/scheduler/jobs-list.partial.html
index 8186242..172944e 100644
--- a/scheduler/templates/admin/scheduler/jobs-list.partial.html
+++ b/scheduler/templates/admin/scheduler/jobs-list.partial.html
@@ -20,7 +20,7 @@
Job executions
{% for exec in executions %}
- {{ exec.id }}
+ {{ exec.id }}
|
{{ exec|job_status }}
diff --git a/scheduler/templates/admin/scheduler/jobs.html b/scheduler/templates/admin/scheduler/jobs.html
index e83d96a..72f8a79 100644
--- a/scheduler/templates/admin/scheduler/jobs.html
+++ b/scheduler/templates/admin/scheduler/jobs.html
@@ -92,11 +92,11 @@
|
+ value="{{ job.name }}"/>
|
-
- {{ job.id }}
+
+ {{ job.name }}
|
diff --git a/scheduler/templates/admin/scheduler/single_job_action.html b/scheduler/templates/admin/scheduler/single_job_action.html
index 53f9089..b6adad5 100644
--- a/scheduler/templates/admin/scheduler/single_job_action.html
+++ b/scheduler/templates/admin/scheduler/single_job_action.html
@@ -6,7 +6,7 @@
Home ›
Queues ›
{{ queue.name }} ›
- {{ job.id }} ›
+ {{ job.name }} ›
Delete
{% endblock %}
@@ -18,8 +18,8 @@
Are you sure you want to {{ action }}
-
- {{ job.id }} ({{ job|show_func_name }})
+
+ {{ job.name }} ({{ job|show_func_name }})
from
{{ queue.name }}?
diff --git a/scheduler/templates/admin/scheduler/stats.html b/scheduler/templates/admin/scheduler/stats.html
index 369e3a5..01b94fd 100644
--- a/scheduler/templates/admin/scheduler/stats.html
+++ b/scheduler/templates/admin/scheduler/stats.html
@@ -30,7 +30,6 @@
Queued Jobs |
Oldest Queued Job |
Active Jobs |
- Deferred Jobs |
Finished Jobs |
Failed Jobs |
Scheduled Jobs |
@@ -63,11 +62,6 @@
{{ queue.started_jobs }}
-
-
- {{ queue.deferred_jobs }}
-
- |
{{ queue.finished_jobs }}
diff --git a/scheduler/templates/admin/scheduler/worker_details.html b/scheduler/templates/admin/scheduler/worker_details.html
index 12ddfb6..4ed7060 100644
--- a/scheduler/templates/admin/scheduler/worker_details.html
+++ b/scheduler/templates/admin/scheduler/worker_details.html
@@ -55,7 +55,7 @@
{% if job %}
{{ job.func_name }}
- ( {{ job.id }})
+ ( {{ job.name }})
{% else %}
No current job
{% endif %}
diff --git a/scheduler/templates/admin/scheduler/workers-list.partial.html b/scheduler/templates/admin/scheduler/workers-list.partial.html
index aff14a2..9618300 100644
--- a/scheduler/templates/admin/scheduler/workers-list.partial.html
+++ b/scheduler/templates/admin/scheduler/workers-list.partial.html
@@ -7,6 +7,9 @@
Name
|
+
+ Queues
+ |
State
|
@@ -41,8 +44,14 @@
{{ worker.name }}
| |
- {{ worker.get_state }} |
- {{ worker.birth_date | date:"Y-m-d, H:i:s" }} |
+
+ {% for queue_name in worker.queue_names %}
+ {{ queue_name }}
+ {% if not forloop.last %},{% endif %}
+ {% endfor %}
+ |
+ {{ worker.state.value }} |
+ {{ worker.birth | date:"Y-m-d, H:i:s" }} |
{{ worker.hostname }} |
{{ worker.pid | unlocalize }} |
{{ worker.total_working_time | default:0 | floatformat }} secs |
diff --git a/scheduler/templatetags/scheduler_tags.py b/scheduler/templatetags/scheduler_tags.py
index faa0835..6cc5f50 100644
--- a/scheduler/templatetags/scheduler_tags.py
+++ b/scheduler/templatetags/scheduler_tags.py
@@ -3,18 +3,21 @@
from django import template
from django.utils.safestring import mark_safe
-from scheduler.rq_classes import JobExecution, DjangoQueue, DjangoWorker
-from scheduler.tools import get_scheduled_task
+from scheduler.helpers.queues import Queue
+from scheduler.helpers.tools import get_scheduled_task
+from scheduler.models.task import Task
+from scheduler.redis_models import Result, ResultType, JobModel, WorkerModel
+from scheduler.views import get_queue
register = template.Library()
@register.filter
-def show_func_name(rq_job: JobExecution) -> str:
+def show_func_name(job: JobModel) -> str:
try:
- res = rq_job.func_name
+ res = job.func_name
if res == "scheduler.tools.run_task":
- task = get_scheduled_task(*rq_job.args)
+ task = get_scheduled_task(*job.args)
res = task.function_string()
return mark_safe(res)
except Exception as e:
@@ -27,31 +30,40 @@ def get_item(dictionary: Dict, key):
@register.filter
-def scheduled_job(job: JobExecution):
- django_scheduled_job = get_scheduled_task(*job.args)
- return django_scheduled_job.get_absolute_url()
+def scheduled_task(job: JobModel) -> Task:
+ django_scheduled_task = get_scheduled_task(*job.args)
+ return django_scheduled_task.get_absolute_url()
@register.filter
-def worker_scheduler_pid(worker: Optional[DjangoWorker]) -> str:
- scheduler_pid = worker.scheduler_pid() if worker is not None else None
- return str(scheduler_pid) if scheduler_pid is not None else "-"
+def worker_scheduler_pid(worker: WorkerModel) -> str:
+ return str(worker.scheduler_pid) if worker.scheduler_pid is not None else "No Scheduler"
@register.filter
-def job_result(job: JobExecution):
- result = job.latest_result()
+def job_result(job: JobModel) -> Optional[str]:
+ queue = get_queue(job.queue_name)
+ result = Result.fetch_latest(queue.connection, job.name)
return result.type.name.capitalize() if result else None
@register.filter
-def job_status(job: JobExecution):
- result = job.get_status()
+def job_exc_info(job: JobModel) -> Optional[str]:
+ queue = get_queue(job.queue_name)
+ result = Result.fetch_latest(queue.connection, job.name)
+ if result and result.type == ResultType.FAILED and result.exc_string:
+ return mark_safe(result.exc_string)
+ return None
+
+
+@register.filter
+def job_status(job: JobModel):
+ result = job.status
return result.capitalize()
@register.filter
-def job_runtime(job: JobExecution):
+def job_runtime(job: JobModel):
ended_at = job.ended_at
if ended_at:
runtime = job.ended_at - job.started_at
@@ -63,8 +75,8 @@ def job_runtime(job: JobExecution):
@register.filter
-def job_scheduled_time(job: JobExecution, queue: DjangoQueue):
+def job_scheduled_time(job: JobModel, queue: Queue):
try:
- return queue.scheduled_job_registry.get_scheduled_time(job.id)
+ return queue.scheduled_job_registry.get_scheduled_time(job.name)
except Exception:
return None
diff --git a/scheduler/tests/jobs.py b/scheduler/tests/jobs.py
index a6b0871..7894022 100644
--- a/scheduler/tests/jobs.py
+++ b/scheduler/tests/jobs.py
@@ -1,6 +1,6 @@
from time import sleep
-from scheduler.queues import get_queue
+from scheduler.helpers.queues import get_queue
_counter = 0
@@ -36,4 +36,4 @@ def test_job():
def enqueue_jobs():
queue = get_queue()
for i in range(20):
- queue.enqueue(test_job, job_id=f"job_{i}", args=())
+ queue.enqueue_call(test_job, name=f"job_{i:02}", args=())
diff --git a/scheduler/tests/test_internals.py b/scheduler/tests/test_internals.py
index f916a48..5a1aad4 100644
--- a/scheduler/tests/test_internals.py
+++ b/scheduler/tests/test_internals.py
@@ -4,7 +4,7 @@
from scheduler.models.task import TaskType
from scheduler.tests.testtools import SchedulerBaseCase, task_factory
-from scheduler.tools import get_scheduled_task
+from scheduler.helpers.tools import get_scheduled_task
class TestInternals(SchedulerBaseCase):
diff --git a/scheduler/tests/test_job_decorator.py b/scheduler/tests/test_job_decorator.py
index 85a64b9..741dcfb 100644
--- a/scheduler/tests/test_job_decorator.py
+++ b/scheduler/tests/test_job_decorator.py
@@ -3,12 +3,13 @@
from django.test import TestCase
from scheduler import job, settings
+from scheduler.helpers.queues import get_queue
from . import test_settings # noqa
from ..decorators import JOB_METHODS_LIST
-from ..queues import get_queue, QueueNotFoundError
+from ..redis_models.job import JobModel
-@job
+@job()
def test_job():
time.sleep(1)
return 1 + 1
@@ -36,36 +37,47 @@ def setUp(self) -> None:
get_queue("default").connection.flushall()
def test_all_job_methods_registered(self):
- self.assertEqual(1, len(JOB_METHODS_LIST))
+ self.assertEqual(4, len(JOB_METHODS_LIST))
def test_job_decorator_no_params(self):
test_job.delay()
- config = settings.SCHEDULER_CONFIG
- self._assert_job_with_func_and_props("default", test_job, config.DEFAULT_RESULT_TTL, config.DEFAULT_TIMEOUT)
+ self._assert_job_with_func_and_props(
+ "default",
+ test_job,
+ settings.SCHEDULER_CONFIG.DEFAULT_RESULT_TTL,
+ settings.SCHEDULER_CONFIG.DEFAULT_JOB_TIMEOUT,
+ )
def test_job_decorator_timeout(self):
test_job_timeout.delay()
- config = settings.SCHEDULER_CONFIG
- self._assert_job_with_func_and_props("default", test_job_timeout, config.DEFAULT_RESULT_TTL, 1)
+ self._assert_job_with_func_and_props(
+ "default",
+ test_job_timeout,
+ settings.SCHEDULER_CONFIG.DEFAULT_RESULT_TTL,
+ 1,
+ )
def test_job_decorator_result_ttl(self):
test_job_result_ttl.delay()
- config = settings.SCHEDULER_CONFIG
- self._assert_job_with_func_and_props("default", test_job_result_ttl, 1, config.DEFAULT_TIMEOUT)
+ self._assert_job_with_func_and_props(
+ "default",
+ test_job_result_ttl,
+ 1,
+ settings.SCHEDULER_CONFIG.DEFAULT_JOB_TIMEOUT,
+ )
def test_job_decorator_different_queue(self):
test_job_diff_queue.delay()
- config = settings.SCHEDULER_CONFIG
self._assert_job_with_func_and_props(
"django_tasks_scheduler_test",
test_job_diff_queue,
- config.DEFAULT_RESULT_TTL,
- config.DEFAULT_TIMEOUT,
+ settings.SCHEDULER_CONFIG.DEFAULT_RESULT_TTL,
+ settings.SCHEDULER_CONFIG.DEFAULT_JOB_TIMEOUT,
)
def _assert_job_with_func_and_props(self, queue_name, expected_func, expected_result_ttl, expected_timeout):
queue = get_queue(queue_name)
- jobs = queue.get_jobs()
+ jobs = JobModel.get_many(queue.queued_job_registry.all(), queue.connection)
self.assertEqual(1, len(jobs))
j = jobs[0]
@@ -74,7 +86,7 @@ def _assert_job_with_func_and_props(self, queue_name, expected_func, expected_re
self.assertEqual(j.timeout, expected_timeout)
def test_job_decorator_bad_queue(self):
- with self.assertRaises(QueueNotFoundError):
+ with self.assertRaises(settings.QueueNotFoundError):
@job("bad-queue")
def test_job_bad_queue():
time.sleep(1)
diff --git a/scheduler/tests/test_mgmt_commands/test_delete_failed_executions.py b/scheduler/tests/test_mgmt_commands/test_delete_failed_executions.py
index 74a53df..b409ef1 100644
--- a/scheduler/tests/test_mgmt_commands/test_delete_failed_executions.py
+++ b/scheduler/tests/test_mgmt_commands/test_delete_failed_executions.py
@@ -1,17 +1,17 @@
from django.core.management import call_command
-from scheduler.queues import get_queue
+from scheduler.helpers.queues import get_queue
+from scheduler.tests import test_settings # noqa
from scheduler.tests.jobs import failing_job
-from scheduler.tests.test_views import BaseTestCase
-from scheduler.tools import create_worker
-from scheduler.tests import test_settings # noqa
+from scheduler.helpers.tools import create_worker
+from scheduler.tests.test_views.base import BaseTestCase
class DeleteFailedExecutionsTest(BaseTestCase):
def test_delete_failed_executions__delete_jobs(self):
queue = get_queue("default")
call_command("delete_failed_executions", queue="default")
- queue.enqueue(failing_job)
+ queue.enqueue_call(failing_job)
worker = create_worker("default")
worker.work(burst=True)
self.assertEqual(1, len(queue.failed_job_registry))
diff --git a/scheduler/tests/test_mgmt_commands/test_export.py b/scheduler/tests/test_mgmt_commands/test_export.py
index 6ad1878..6e274f6 100644
--- a/scheduler/tests/test_mgmt_commands/test_export.py
+++ b/scheduler/tests/test_mgmt_commands/test_export.py
@@ -9,7 +9,7 @@
from scheduler.tests import test_settings # noqa
from scheduler.tests.testtools import task_factory
-from scheduler.tools import TaskType
+from scheduler.helpers.tools import TaskType
class ExportTest(TestCase):
@@ -22,34 +22,34 @@ def tearDown(self) -> None:
os.remove(self.tmpfile.name)
def test_export__should_export_job(self):
- jobs = list()
- jobs.append(task_factory(TaskType.ONCE, enabled=True))
- jobs.append(task_factory(TaskType.REPEATABLE, enabled=True))
+ tasks = list()
+ tasks.append(task_factory(TaskType.ONCE, enabled=True))
+ tasks.append(task_factory(TaskType.REPEATABLE, enabled=True))
# act
call_command("export", filename=self.tmpfile.name)
# assert
result = json.load(self.tmpfile)
- self.assertEqual(len(jobs), len(result))
- self.assertEqual(result[0], jobs[0].to_dict())
- self.assertEqual(result[1], jobs[1].to_dict())
+ self.assertEqual(len(tasks), len(result))
+ self.assertEqual(result[0], tasks[0].to_dict())
+ self.assertEqual(result[1], tasks[1].to_dict())
def test_export__should_export_enabled_jobs_only(self):
- jobs = list()
- jobs.append(task_factory(TaskType.ONCE, enabled=True))
- jobs.append(task_factory(TaskType.REPEATABLE, enabled=False))
+ tasks = list()
+ tasks.append(task_factory(TaskType.ONCE, enabled=True))
+ tasks.append(task_factory(TaskType.REPEATABLE, enabled=False))
# act
call_command("export", filename=self.tmpfile.name, enabled=True)
# assert
result = json.load(self.tmpfile)
- self.assertEqual(len(jobs) - 1, len(result))
- self.assertEqual(result[0], jobs[0].to_dict())
+ self.assertEqual(len(tasks) - 1, len(result))
+ self.assertEqual(result[0], tasks[0].to_dict())
def test_export__should_export_job_yaml_without_yaml_lib(self):
- jobs = list()
- jobs.append(task_factory(TaskType.ONCE, enabled=True))
- jobs.append(task_factory(TaskType.REPEATABLE, enabled=True))
+ tasks = list()
+ tasks.append(task_factory(TaskType.ONCE, enabled=True))
+ tasks.append(task_factory(TaskType.REPEATABLE, enabled=True))
# act
with mock.patch.dict("sys.modules", {"yaml": None}):
@@ -58,16 +58,16 @@ def test_export__should_export_job_yaml_without_yaml_lib(self):
self.assertEqual(cm.exception.code, 1)
def test_export__should_export_job_yaml_green(self):
- jobs = list()
- jobs.append(task_factory(TaskType.ONCE, enabled=True))
- jobs.append(task_factory(TaskType.REPEATABLE, enabled=True))
- jobs.append(task_factory(TaskType.CRON, enabled=True))
+ tasks = list()
+ tasks.append(task_factory(TaskType.ONCE, enabled=True))
+ tasks.append(task_factory(TaskType.REPEATABLE, enabled=True))
+ tasks.append(task_factory(TaskType.CRON, enabled=True))
# act
call_command("export", filename=self.tmpfile.name, format="yaml")
# assert
result = yaml.load(self.tmpfile, yaml.SafeLoader)
- self.assertEqual(len(jobs), len(result))
- self.assertEqual(result[0], jobs[0].to_dict())
- self.assertEqual(result[1], jobs[1].to_dict())
- self.assertEqual(result[2], jobs[2].to_dict())
+ self.assertEqual(len(tasks), len(result))
+ self.assertEqual(result[0], tasks[0].to_dict())
+ self.assertEqual(result[1], tasks[1].to_dict())
+ self.assertEqual(result[2], tasks[2].to_dict())
diff --git a/scheduler/tests/test_mgmt_commands/test_import.py b/scheduler/tests/test_mgmt_commands/test_import.py
index 2d641fc..318c068 100644
--- a/scheduler/tests/test_mgmt_commands/test_import.py
+++ b/scheduler/tests/test_mgmt_commands/test_import.py
@@ -8,9 +8,9 @@
from django.test import TestCase
from scheduler.models.task import Task
+from scheduler.tests import test_settings # noqa
from scheduler.tests.testtools import task_factory
-from scheduler.tools import TaskType
-from scheduler.tests import test_settings # noqa
+from scheduler.helpers.tools import TaskType
class ImportTest(TestCase):
diff --git a/scheduler/tests/test_mgmt_commands/test_rq_stats.py b/scheduler/tests/test_mgmt_commands/test_rq_stats.py
index 0daf641..dc43a49 100644
--- a/scheduler/tests/test_mgmt_commands/test_rq_stats.py
+++ b/scheduler/tests/test_mgmt_commands/test_rq_stats.py
@@ -1,7 +1,7 @@
from django.core.management import call_command
from django.test import TestCase
-from scheduler.tests import test_settings # noqa
+from scheduler.tests import test_settings # noqa
class RqstatsTest(TestCase):
diff --git a/scheduler/tests/test_mgmt_commands/test_rq_worker.py b/scheduler/tests/test_mgmt_commands/test_rq_worker.py
index c4e4e49..979e59d 100644
--- a/scheduler/tests/test_mgmt_commands/test_rq_worker.py
+++ b/scheduler/tests/test_mgmt_commands/test_rq_worker.py
@@ -1,9 +1,10 @@
from django.core.management import call_command
from django.test import TestCase
-from scheduler.queues import get_queue
+from scheduler.helpers.queues import get_queue
+from scheduler.redis_models import JobModel
+from scheduler.tests import test_settings # noqa
from scheduler.tests.jobs import failing_job
-from scheduler.tests import test_settings # noqa
class RqworkerTestCase(TestCase):
@@ -12,71 +13,34 @@ def test_rqworker__no_queues_params(self):
queue = get_queue("default")
# enqueue some jobs that will fail
- jobs = []
- job_ids = []
+ job_names = []
for _ in range(0, 3):
- job = queue.enqueue(failing_job)
- jobs.append(job)
- job_ids.append(job.id)
+ job = queue.enqueue_call(failing_job)
+ job_names.append(job.name)
# Create a worker to execute these jobs
call_command("rqworker", fork_job_execution=False, burst=True)
# check if all jobs are really failed
- for job in jobs:
+ for job_name in job_names:
+ job = JobModel.get(name=job_name, connection=queue.connection)
self.assertTrue(job.is_failed)
- def test_rqworker__job_class_param__green(self):
- queue = get_queue("default")
-
- # enqueue some jobs that will fail
- jobs = []
- job_ids = []
- for _ in range(0, 3):
- job = queue.enqueue(failing_job)
- jobs.append(job)
- job_ids.append(job.id)
-
- # Create a worker to execute these jobs
- call_command(
- "rqworker", "--job-class", "scheduler.rq_classes.JobExecution", fork_job_execution=False, burst=True
- )
-
- # check if all jobs are really failed
- for job in jobs:
- self.assertTrue(job.is_failed)
-
- def test_rqworker__bad_job_class__fail(self):
- queue = get_queue("default")
-
- # enqueue some jobs that will fail
- jobs = []
- job_ids = []
- for _ in range(0, 3):
- job = queue.enqueue(failing_job)
- jobs.append(job)
- job_ids.append(job.id)
-
- # Create a worker to execute these jobs
- with self.assertRaises(ImportError):
- call_command("rqworker", "--job-class", "rq.badclass", fork_job_execution=False, burst=True)
-
def test_rqworker__run_jobs(self):
queue = get_queue("default")
# enqueue some jobs that will fail
- jobs = []
- job_ids = []
+ job_names = []
for _ in range(0, 3):
- job = queue.enqueue(failing_job)
- jobs.append(job)
- job_ids.append(job.id)
+ job = queue.enqueue_call(failing_job)
+ job_names.append(job.name)
# Create a worker to execute these jobs
call_command("rqworker", "default", fork_job_execution=False, burst=True)
# check if all jobs are really failed
- for job in jobs:
+ for job_name in job_names:
+ job = JobModel.get(name=job_name, connection=queue.connection)
self.assertTrue(job.is_failed)
def test_rqworker__worker_with_two_queues(self):
@@ -84,32 +48,34 @@ def test_rqworker__worker_with_two_queues(self):
queue2 = get_queue("django_tasks_scheduler_test")
# enqueue some jobs that will fail
- jobs = []
- job_ids = []
+ job_names = []
for _ in range(0, 3):
- job = queue.enqueue(failing_job)
- jobs.append(job)
- job_ids.append(job.id)
- job = queue2.enqueue(failing_job)
- jobs.append(job)
- job_ids.append(job.id)
+ job = queue.enqueue_call(failing_job)
+ job_names.append(job.name)
+ job = queue2.enqueue_call(failing_job)
+ job_names.append(job.name)
# Create a worker to execute these jobs
call_command("rqworker", "default", "django_tasks_scheduler_test", fork_job_execution=False, burst=True)
# check if all jobs are really failed
- for job in jobs:
+ for job_name in job_names:
+ job = JobModel.get(name=job_name, connection=queue.connection)
self.assertTrue(job.is_failed)
def test_rqworker__worker_with_one_queue__does_not_perform_other_queue_job(self):
queue = get_queue("default")
queue2 = get_queue("django_tasks_scheduler_test")
- job = queue.enqueue(failing_job)
- other_job = queue2.enqueue(failing_job)
+ job = queue.enqueue_call(failing_job)
+ other_job = queue2.enqueue_call(failing_job)
# Create a worker to execute these jobs
call_command("rqworker", "default", fork_job_execution=False, burst=True)
+
# assert
+ job = JobModel.get(job.name, connection=queue.connection)
self.assertTrue(job.is_failed)
- self.assertTrue(other_job.is_queued)
+ other_job = JobModel.get(other_job.name, connection=queue.connection)
+
+ self.assertTrue(other_job.is_queued, f"Expected other job to be queued but status={other_job.status}")
diff --git a/scheduler/tests/test_mgmt_commands/test_run_job.py b/scheduler/tests/test_mgmt_commands/test_run_job.py
index 4efe24d..a589589 100644
--- a/scheduler/tests/test_mgmt_commands/test_run_job.py
+++ b/scheduler/tests/test_mgmt_commands/test_run_job.py
@@ -1,9 +1,10 @@
from django.core.management import call_command
from django.test import TestCase
-from scheduler.queues import get_queue
+from scheduler.helpers.queues import get_queue
+from scheduler.redis_models import JobModel
+from scheduler.tests import test_settings # noqa
from scheduler.tests.jobs import test_job
-from scheduler.tests import test_settings # noqa
class RunJobTest(TestCase):
@@ -14,6 +15,6 @@ def test_run_job__should_schedule_job(self):
# act
call_command("run_job", func_name, queue="default")
# assert
- job_list = queue.get_jobs()
+ job_list = JobModel.get_many(queue.queued_job_registry.all(), queue.connection)
self.assertEqual(1, len(job_list))
self.assertEqual(func_name + "()", job_list[0].get_call_string())
diff --git a/scheduler/tests/test_task_types/test_cron_task.py b/scheduler/tests/test_task_types/test_cron_task.py
index a7d2a7a..6abe3b6 100644
--- a/scheduler/tests/test_task_types/test_cron_task.py
+++ b/scheduler/tests/test_task_types/test_cron_task.py
@@ -1,26 +1,31 @@
from django.core.exceptions import ValidationError
from scheduler import settings
-from scheduler.queues import get_queue
+from scheduler.helpers.queues import get_queue
+from scheduler.redis_models import JobModel
from scheduler.tests.test_task_types.test_task_model import BaseTestCases
from scheduler.tests.testtools import task_factory
-from scheduler.tools import create_worker, TaskType
+from scheduler.helpers.tools import create_worker, TaskType
class TestCronTask(BaseTestCases.TestBaseTask):
task_type = TaskType.CRON
+ def setUp(self) -> None:
+ super().setUp()
+ self.queue_name = settings.get_queue_names()[0]
+
def test_clean(self):
task = task_factory(self.task_type)
task.cron_string = "* * * * *"
- task.queue = list(settings.QUEUES)[0]
+ task.queue = self.queue_name
task.callable = "scheduler.tests.jobs.test_job"
self.assertIsNone(task.clean())
def test_clean_cron_string_invalid(self):
task = task_factory(self.task_type)
task.cron_string = "not-a-cron-string"
- task.queue = list(settings.QUEUES)[0]
+ task.queue = self.queue_name
task.callable = "scheduler.tests.jobs.test_job"
with self.assertRaises(ValidationError):
task.clean_cron_string()
@@ -29,7 +34,8 @@ def test_check_rescheduled_after_execution(self):
task = task_factory(self.task_type)
queue = task.rqueue
first_run_id = task.job_id
- entry = queue.fetch_job(first_run_id)
+ entry = JobModel.get(first_run_id, connection=queue.connection)
+ self.assertIsNotNone(entry)
queue.run_sync(entry)
task.refresh_from_db()
self.assertEqual(task.failed_runs, 0)
@@ -40,13 +46,10 @@ def test_check_rescheduled_after_execution(self):
self.assertNotEqual(task.job_id, first_run_id)
def test_check_rescheduled_after_failed_execution(self):
- task = task_factory(
- self.task_type,
- callable_name="scheduler.tests.jobs.scheduler.tests.jobs.test_job",
- )
+ task = task_factory(self.task_type, callable_name="scheduler.tests.jobs.failing_job")
queue = task.rqueue
first_run_id = task.job_id
- entry = queue.fetch_job(first_run_id)
+ entry = JobModel.get(first_run_id, connection=queue.connection)
queue.run_sync(entry)
task.refresh_from_db()
self.assertEqual(task.failed_runs, 1)
@@ -58,21 +61,19 @@ def test_check_rescheduled_after_failed_execution(self):
def test_cron_task_enqueuing_jobs(self):
queue = get_queue()
- prev_queued = len(queue.scheduled_job_registry)
- prev_finished = len(queue.finished_job_registry)
+ prev_queued = queue.scheduled_job_registry.count(connection=queue.connection)
+ prev_finished = queue.finished_job_registry.count(connection=queue.connection)
+
task = task_factory(self.task_type, callable_name="scheduler.tests.jobs.enqueue_jobs")
- self.assertEqual(prev_queued + 1, len(queue.scheduled_job_registry))
+ self.assertEqual(prev_queued + 1, queue.scheduled_job_registry.count(connection=queue.connection))
first_run_id = task.job_id
- entry = queue.fetch_job(first_run_id)
+ entry = JobModel.get(first_run_id, connection=queue.connection)
queue.run_sync(entry)
- self.assertEqual(20, len(queue))
- self.assertEqual(prev_finished + 1, len(queue.finished_job_registry))
- worker = create_worker(
- "default",
- fork_job_execution=False,
- )
+ self.assertEqual(20, len(queue.queued_job_registry))
+ self.assertEqual(prev_finished + 1, queue.finished_job_registry.count(connection=queue.connection))
+ worker = create_worker("default", fork_job_execution=False)
worker.work(burst=True)
- self.assertEqual(prev_finished + 21, len(queue.finished_job_registry))
+ self.assertEqual(prev_finished + 21, queue.finished_job_registry.count(connection=queue.connection))
worker.refresh()
- self.assertEqual(20, worker.successful_job_count)
- self.assertEqual(0, worker.failed_job_count)
+ self.assertEqual(20, worker._model.successful_job_count)
+ self.assertEqual(0, worker._model.failed_job_count)
diff --git a/scheduler/tests/test_task_types/test_once_task.py b/scheduler/tests/test_task_types/test_once_task.py
index f9b686c..b57fd77 100644
--- a/scheduler/tests/test_task_types/test_once_task.py
+++ b/scheduler/tests/test_task_types/test_once_task.py
@@ -10,10 +10,11 @@
class TestScheduledTask(BaseTestCases.TestSchedulableTask):
task_type = TaskType.ONCE
+ queue_name = settings.get_queue_names()[0]
def test_clean(self):
job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
+ job.queue = self.queue_name
job.callable = "scheduler.tests.jobs.test_job"
self.assertIsNone(job.clean())
diff --git a/scheduler/tests/test_task_types/test_repeatable_task.py b/scheduler/tests/test_task_types/test_repeatable_task.py
index fce0d32..b1b5872 100644
--- a/scheduler/tests/test_task_types/test_repeatable_task.py
+++ b/scheduler/tests/test_task_types/test_repeatable_task.py
@@ -5,13 +5,15 @@
from django.utils import timezone
from scheduler import settings
+from scheduler.redis_models import JobModel
from scheduler.tests.test_task_types.test_task_model import BaseTestCases
from scheduler.tests.testtools import task_factory, _get_task_job_execution_from_registry
-from scheduler.tools import TaskType
+from scheduler.helpers.tools import TaskType
class TestRepeatableTask(BaseTestCases.TestSchedulableTask):
task_type = TaskType.REPEATABLE
+ queue_name = settings.get_queue_names()[0]
def test_unschedulable_old_job(self):
job = task_factory(self.task_type, scheduled_time=timezone.now() - timedelta(hours=1), repeat=0)
@@ -24,7 +26,7 @@ def test_schedulable_old_job_repeat_none(self):
def test_clean(self):
job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
+ job.queue = self.queue_name
job.callable = "scheduler.tests.jobs.test_job"
job.interval = 1
job.result_ttl = -1
@@ -32,21 +34,17 @@ def test_clean(self):
def test_clean_seconds(self):
job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
+ job.queue = self.queue_name
job.callable = "scheduler.tests.jobs.test_job"
job.interval = 60
job.result_ttl = -1
job.interval_unit = "seconds"
self.assertIsNone(job.clean())
- @override_settings(
- SCHEDULER_CONFIG={
- "SCHEDULER_INTERVAL": 10,
- }
- )
+ @override_settings(SCHEDULER_CONFIG={"SCHEDULER_INTERVAL": 10})
def test_clean_too_frequent(self):
job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
+ job.queue = self.queue_name
job.callable = "scheduler.tests.jobs.test_job"
job.interval = 2 # Smaller than 10
job.result_ttl = -1
@@ -56,7 +54,7 @@ def test_clean_too_frequent(self):
def test_clean_not_multiple(self):
job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
+ job.queue = self.queue_name
job.callable = "scheduler.tests.jobs.test_job"
job.interval = 121
job.interval_unit = "seconds"
@@ -64,41 +62,41 @@ def test_clean_not_multiple(self):
job.clean_interval_unit()
def test_clean_short_result_ttl(self):
- job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
- job.callable = "scheduler.tests.jobs.test_job"
- job.interval = 1
- job.repeat = 1
- job.result_ttl = 3599
- job.interval_unit = "hours"
- job.repeat = 42
+ task = task_factory(self.task_type)
+ task.queue = self.queue_name
+ task.callable = "scheduler.tests.jobs.test_job"
+ task.interval = 1
+ task.repeat = 1
+ task.result_ttl = 3599
+ task.interval_unit = "hours"
+ task.repeat = 42
with self.assertRaises(ValidationError):
- job.clean_result_ttl()
+ task.clean_result_ttl()
def test_clean_indefinite_result_ttl(self):
- job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
- job.callable = "scheduler.tests.jobs.test_job"
- job.interval = 1
- job.result_ttl = -1
- job.interval_unit = "hours"
- job.clean_result_ttl()
+ task = task_factory(self.task_type)
+ task.queue = self.queue_name
+ task.callable = "scheduler.tests.jobs.test_job"
+ task.interval = 1
+ task.result_ttl = -1
+ task.interval_unit = "hours"
+ task.clean_result_ttl()
def test_clean_undefined_result_ttl(self):
- job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
- job.callable = "scheduler.tests.jobs.test_job"
- job.interval = 1
- job.interval_unit = "hours"
- job.clean_result_ttl()
+ task = task_factory(self.task_type)
+ task.queue = self.queue_name
+ task.callable = "scheduler.tests.jobs.test_job"
+ task.interval = 1
+ task.interval_unit = "hours"
+ task.clean_result_ttl()
def test_interval_seconds_weeks(self):
- job = task_factory(self.task_type, interval=2, interval_unit="weeks")
- self.assertEqual(1209600.0, job.interval_seconds())
+ task = task_factory(self.task_type, interval=2, interval_unit="weeks")
+ self.assertEqual(1209600.0, task.interval_seconds())
def test_interval_seconds_days(self):
- job = task_factory(self.task_type, interval=2, interval_unit="days")
- self.assertEqual(172800.0, job.interval_seconds())
+ task = task_factory(self.task_type, interval=2, interval_unit="days")
+ self.assertEqual(172800.0, task.interval_seconds())
def test_interval_seconds_hours(self):
job = task_factory(self.task_type, interval=2, interval_unit="hours")
@@ -113,9 +111,7 @@ def test_interval_seconds_seconds(self):
self.assertEqual(15.0, job.interval_seconds())
def test_result_interval(self):
- job = task_factory(
- self.task_type,
- )
+ job = task_factory(self.task_type)
entry = _get_task_job_execution_from_registry(job)
self.assertEqual(entry.meta["interval"], 3600)
@@ -155,7 +151,7 @@ def test_check_rescheduled_after_execution(self):
task = task_factory(self.task_type, scheduled_time=timezone.now() + timedelta(seconds=1), repeat=10)
queue = task.rqueue
first_run_id = task.job_id
- entry = queue.fetch_job(first_run_id)
+ entry = JobModel.get(first_run_id, connection=queue.connection)
queue.run_sync(entry)
task.refresh_from_db()
self.assertEqual(task.failed_runs, 0)
@@ -174,7 +170,7 @@ def test_check_rescheduled_after_execution_failed_job(self):
)
queue = task.rqueue
first_run_id = task.job_id
- entry = queue.fetch_job(first_run_id)
+ entry = JobModel.get(first_run_id, connection=queue.connection)
queue.run_sync(entry)
task.refresh_from_db()
self.assertEqual(task.failed_runs, 1)
@@ -192,7 +188,7 @@ def test_check_not_rescheduled_after_last_repeat(self):
)
queue = task.rqueue
first_run_id = task.job_id
- entry = queue.fetch_job(first_run_id)
+ entry = JobModel.get(first_run_id, connection=queue.connection)
queue.run_sync(entry)
task.refresh_from_db()
self.assertEqual(task.failed_runs, 0)
diff --git a/scheduler/tests/test_task_types/test_task_model.py b/scheduler/tests/test_task_types/test_task_model.py
index 70640a8..c16ea70 100644
--- a/scheduler/tests/test_task_types/test_task_model.py
+++ b/scheduler/tests/test_task_types/test_task_model.py
@@ -10,12 +10,17 @@
from scheduler import settings
from scheduler.models.task import TaskType, Task, TaskArg, TaskKwarg
-from scheduler.queues import get_queue
-from scheduler.tests import jobs
+from scheduler.helpers.queues import get_queue
+from scheduler.helpers.queues import perform_job
+from scheduler.tests import jobs, test_settings # noqa
from scheduler.tests.testtools import (
- task_factory, taskarg_factory, _get_task_job_execution_from_registry,
- SchedulerBaseCase, _get_executions, )
-from scheduler.tools import run_task, create_worker
+ task_factory,
+ taskarg_factory,
+ _get_task_job_execution_from_registry,
+ SchedulerBaseCase,
+ _get_executions,
+)
+from scheduler.helpers.tools import run_task, create_worker
def assert_response_has_msg(response, message):
@@ -25,7 +30,7 @@ def assert_response_has_msg(response, message):
def assert_has_execution_with_status(task, status):
job_list = _get_executions(task)
- job_list = [(j.id, j.get_status()) for j in job_list]
+ job_list = [(j.name, j.get_status(connection=task.rqueue.connection)) for j in job_list]
for job in job_list:
if job[1] == status:
return
@@ -35,6 +40,7 @@ def assert_has_execution_with_status(task, status):
class BaseTestCases:
class TestBaseTask(SchedulerBaseCase):
task_type = None
+ queue_name = settings.get_queue_names()[0]
def test_callable_func(self):
task = task_factory(self.task_type)
@@ -60,7 +66,7 @@ def test_clean_callable_invalid(self):
task.clean_callable()
def test_clean_queue(self):
- for queue in settings.QUEUES.keys():
+ for queue in settings.get_queue_names():
task = task_factory(self.task_type)
task.queue = queue
self.assertIsNone(task.clean_queue())
@@ -75,13 +81,13 @@ def test_clean_queue_invalid(self):
# next 2 check the above are included in job.clean() function
def test_clean_base(self):
task = task_factory(self.task_type)
- task.queue = list(settings.QUEUES)[0]
+ task.queue = self.queue_name
task.callable = "scheduler.tests.jobs.test_job"
self.assertIsNone(task.clean())
def test_clean_invalid_callable(self):
task = task_factory(self.task_type)
- task.queue = list(settings.QUEUES)[0]
+ task.queue = self.queue_name
task.callable = "scheduler.tests.jobs.test_non_callable"
with self.assertRaises(ValidationError):
task.clean()
@@ -141,7 +147,7 @@ def test_save_and_schedule(self):
def test_schedule2(self):
task = task_factory(self.task_type)
- task.queue = list(settings.QUEUES)[0]
+ task.queue = self.queue_name
task.enabled = False
task.scheduled_time = timezone.now() + timedelta(minutes=1)
self.assertFalse(task._schedule())
@@ -179,18 +185,20 @@ def test_timeout_passthrough(self):
def test_at_front_passthrough(self):
task = task_factory(self.task_type, at_front=True)
queue = task.rqueue
- jobs_to_schedule = queue.scheduled_job_registry.get_job_ids()
+ jobs_to_schedule = queue.scheduled_job_registry.all()
self.assertIn(task.job_id, jobs_to_schedule)
def test_callable_result(self):
task = task_factory(self.task_type)
entry = _get_task_job_execution_from_registry(task)
- self.assertEqual(entry.perform(), 2)
+ queue = get_queue("default")
+ self.assertEqual(perform_job(entry, connection=queue.connection), 2)
def test_callable_empty_args_and_kwargs(self):
task = task_factory(self.task_type, callable="scheduler.tests.jobs.test_args_kwargs")
entry = _get_task_job_execution_from_registry(task)
- self.assertEqual(entry.perform(), "test_args_kwargs()")
+ queue = get_queue("default")
+ self.assertEqual(perform_job(entry, connection=queue.connection), "test_args_kwargs()")
def test_delete_args(self):
task = task_factory(self.task_type)
@@ -235,7 +243,9 @@ def test_callable_args_and_kwargs(self):
taskarg_factory(TaskKwarg, key="key3", arg_type="bool", val=False, content_object=task)
task.save()
entry = _get_task_job_execution_from_registry(task)
- self.assertEqual(entry.perform(), "test_args_kwargs('one', key1=2, key2={}, key3=False)".format(date))
+ queue = get_queue("default")
+ self.assertEqual(perform_job(entry, connection=queue.connection),
+ "test_args_kwargs('one', key1=2, key2={}, key3=False)".format(date))
def test_function_string(self):
task = task_factory(self.task_type)
@@ -306,7 +316,7 @@ def test_admin_run_job_now_enqueues_job_at(self):
self.assertEqual(302, res.status_code)
task.refresh_from_db()
queue = get_queue(task.queue)
- self.assertIn(task.job_id, queue.get_job_ids())
+ self.assertIn(task.job_id, queue.queued_job_registry.all())
def test_admin_change_view(self):
# arrange
@@ -355,30 +365,26 @@ def test_admin_enqueue_job_now(self):
task.id,
],
}
- model = task._meta.model.__name__.lower()
- url = reverse(f"admin:scheduler_{model}_changelist")
+ url = reverse(f"admin:scheduler_task_changelist")
# act
res = self.client.post(url, data=data, follow=True)
# assert part 1
self.assertEqual(200, res.status_code)
entry = _get_task_job_execution_from_registry(task)
- task_model, scheduled_task_id = entry.args
- self.assertEqual(task_model, task.task_type)
+ task_type, scheduled_task_id = entry.args
+ self.assertEqual(task_type, task.task_type)
self.assertEqual(scheduled_task_id, task.id)
- self.assertEqual("scheduled", entry.get_status())
+ self.assertEqual("scheduled", entry.get_status(connection=task.rqueue.connection))
assert_has_execution_with_status(task, "queued")
# act 2
- worker = create_worker(
- "default",
- fork_job_execution=False,
- )
+ worker = create_worker("default", fork_job_execution=False)
worker.work(burst=True)
# assert 2
entry = _get_task_job_execution_from_registry(task)
- self.assertEqual(task_model, task.task_type)
+ self.assertEqual(task_type, task.task_type)
self.assertEqual(scheduled_task_id, task.id)
assert_has_execution_with_status(task, "finished")
@@ -432,28 +438,18 @@ def test_admin_single_delete(self):
# arrange
self.client.login(username="admin", password="admin")
prev_count = Task.objects.filter(task_type=self.task_type).count()
- task = task_factory(
- self.task_type,
- )
+ task = task_factory(self.task_type)
self.assertIsNotNone(task.job_id)
self.assertTrue(task.is_scheduled())
- prev = len(_get_executions(task))
- model = task._meta.model.__name__.lower()
- url = reverse(
- f"admin:scheduler_{model}_delete",
- args=[
- task.pk,
- ],
- )
- data = {
- "post": "yes",
- }
+ prev_executions_count = len(_get_executions(task))
+ url = reverse(f"admin:scheduler_task_delete", args=[task.pk])
+ data = dict(post="yes")
# act
res = self.client.post(url, data=data, follow=True)
# assert
self.assertEqual(200, res.status_code)
self.assertEqual(prev_count, Task.objects.filter(task_type=self.task_type).count())
- self.assertEqual(prev - 1, len(_get_executions(task)))
+ self.assertEqual(prev_executions_count - 1, len(_get_executions(task)))
def test_admin_delete_selected(self):
# arrange
@@ -461,7 +457,7 @@ def test_admin_delete_selected(self):
task = task_factory(self.task_type, enabled=True)
task.save()
queue = get_queue(task.queue)
- scheduled_jobs = queue.scheduled_job_registry.get_job_ids()
+ scheduled_jobs = queue.scheduled_job_registry.all()
job_id = task.job_id
self.assertIn(job_id, scheduled_jobs)
data = {
@@ -479,7 +475,7 @@ def test_admin_delete_selected(self):
self.assertEqual(200, res.status_code)
assert_response_has_msg(res, "Successfully deleted 1 task.")
self.assertIsNone(Task.objects.filter(task_type=self.task_type).filter(id=task.id).first())
- scheduled_jobs = queue.scheduled_job_registry.get_job_ids()
+ scheduled_jobs = queue.scheduled_job_registry.all()
self.assertNotIn(job_id, scheduled_jobs)
class TestSchedulableTask(TestBaseTask):
diff --git a/scheduler/tests/test_views.py b/scheduler/tests/test_views.py
deleted file mode 100644
index 5d6f227..0000000
--- a/scheduler/tests/test_views.py
+++ /dev/null
@@ -1,539 +0,0 @@
-import uuid
-from datetime import datetime
-from unittest.mock import patch, PropertyMock
-
-from django.contrib.auth.models import User
-from django.test import TestCase
-from django.test.client import Client
-from django.urls import reverse
-
-from scheduler.queues import get_queue
-from scheduler.rq_classes import JobExecution, ExecutionStatus
-from scheduler.tests import test_settings # noqa
-from scheduler.tests.jobs import failing_job, long_job, test_job
-from scheduler.tests.testtools import assert_message_in_response, task_factory, _get_task_job_execution_from_registry
-from scheduler.tools import create_worker, TaskType
-
-
-class BaseTestCase(TestCase):
- def setUp(self):
- self.user = User.objects.create_superuser("user", password="pass")
- self.client = Client()
- self.client.login(username=self.user.username, password="pass")
- get_queue("django_tasks_scheduler_test").connection.flushall()
-
-
-class SingleJobActionViewsTest(BaseTestCase):
-
- def test_single_job_action_unknown_job(self):
- res = self.client.get(reverse("queue_job_action", args=["unknown", "cancel"]), follow=True)
- self.assertEqual(400, res.status_code)
-
- def test_single_job_action_unknown_action(self):
- queue = get_queue("default")
- job = queue.enqueue(failing_job)
- worker = create_worker("default")
- worker.work(burst=True)
- job.refresh()
- self.assertTrue(job.is_failed)
- res = self.client.get(reverse("queue_job_action", args=[job.id, "unknown"]), follow=True)
- self.assertEqual(404, res.status_code)
-
- def test_single_job_action_requeue_job(self):
- queue = get_queue("default")
- job = queue.enqueue(failing_job)
- worker = create_worker("default")
- worker.work(burst=True)
- job.refresh()
- self.assertTrue(job.is_failed)
- res = self.client.get(reverse("queue_job_action", args=[job.id, "requeue"]), follow=True)
- self.assertEqual(200, res.status_code)
- self.client.post(reverse("queue_job_action", args=[job.id, "requeue"]), {"requeue": "Requeue"}, follow=True)
- self.assertIn(job, queue.jobs)
- job.delete()
-
- def test_single_job_action_delete_job(self):
- queue = get_queue("django_tasks_scheduler_test")
- job = queue.enqueue(test_job)
- res = self.client.get(reverse("queue_job_action", args=[job.id, "delete"]), follow=True)
- self.assertEqual(200, res.status_code)
- self.client.post(reverse("queue_job_action", args=[job.id, "delete"]), {"post": "yes"}, follow=True)
- self.assertFalse(JobExecution.exists(job.id, connection=queue.connection))
- self.assertNotIn(job.id, queue.get_job_ids())
-
- def test_single_job_action_cancel_job(self):
- queue = get_queue("django_tasks_scheduler_test")
- job = queue.enqueue(long_job)
- res = self.client.get(reverse("queue_job_action", args=[job.id, "cancel"]), follow=True)
- self.assertEqual(200, res.status_code)
- res = self.client.post(reverse("queue_job_action", args=[job.id, "cancel"]), {"post": "yes"}, follow=True)
- self.assertEqual(200, res.status_code)
- tmp = JobExecution.fetch(job.id, connection=queue.connection)
- self.assertTrue(tmp.is_canceled)
- self.assertNotIn(job.id, queue.get_job_ids())
-
- def test_single_job_action_cancel_job_that_is_already_cancelled(self):
- queue = get_queue("django_tasks_scheduler_test")
- job = queue.enqueue(long_job)
- res = self.client.post(reverse("queue_job_action", args=[job.id, "cancel"]), {"post": "yes"}, follow=True)
- self.assertEqual(200, res.status_code)
- tmp = JobExecution.fetch(job.id, connection=queue.connection)
- self.assertTrue(tmp.is_canceled)
- self.assertNotIn(job.id, queue.get_job_ids())
- res = self.client.post(reverse("queue_job_action", args=[job.id, "cancel"]), {"post": "yes"}, follow=True)
- self.assertEqual(200, res.status_code)
- assert_message_in_response(res, f"Could not perform action: Cannot cancel already canceled job: {job.id}")
-
- def test_single_job_action_enqueue_job(self):
- queue = get_queue("django_tasks_scheduler_test")
- job_list = []
- # enqueue some jobs that depends on other
- previous_job = None
- for _ in range(0, 3):
- job = queue.enqueue(test_job, depends_on=previous_job)
- job_list.append(job)
- previous_job = job
-
- # This job is deferred
-
- self.assertEqual(job_list[-1].get_status(), ExecutionStatus.DEFERRED)
- self.assertIsNone(job_list[-1].enqueued_at)
-
- # Try to force enqueue last job should do nothing
- res = self.client.get(reverse("queue_job_action", args=[job_list[-1].id, "enqueue"]), follow=True)
- self.assertEqual(200, res.status_code)
- res = self.client.post(reverse("queue_job_action", args=[job_list[-1].id, "enqueue"]), follow=True)
-
- # Check that job is still deferred because it has dependencies (rq 1.14 change)
- self.assertEqual(200, res.status_code)
- tmp = queue.fetch_job(job_list[-1].id)
- self.assertEqual(tmp.get_status(), ExecutionStatus.QUEUED)
- self.assertIsNotNone(tmp.enqueued_at)
-
-
-class JobListActionViewsTest(BaseTestCase):
- def test_job_list_action_delete_jobs__with_bad_next_url(self):
- queue = get_queue("django_tasks_scheduler_test")
-
- # enqueue some jobs
- job_ids = []
- for _ in range(0, 3):
- job = queue.enqueue(test_job)
- job_ids.append(job.id)
-
- # remove those jobs using view
- res = self.client.post(
- reverse(
- "queue_actions",
- args=[
- queue.name,
- ],
- ),
- {
- "action": "delete",
- "job_ids": job_ids,
- "next_url": "bad_url",
- },
- follow=True,
- )
- assert_message_in_response(res, "Bad followup URL")
- # check if jobs are removed
- self.assertEqual(200, res.status_code)
- for job_id in job_ids:
- self.assertFalse(JobExecution.exists(job_id, connection=queue.connection))
- self.assertNotIn(job_id, queue.job_ids)
-
- def test_job_list_action_delete_jobs(self):
- queue = get_queue("django_tasks_scheduler_test")
-
- # enqueue some jobs
- job_ids = []
- for _ in range(0, 3):
- job = queue.enqueue(test_job)
- job_ids.append(job.id)
-
- # remove those jobs using view
- res = self.client.post(
- reverse(
- "queue_actions",
- args=[
- queue.name,
- ],
- ),
- {"action": "delete", "job_ids": job_ids},
- follow=True,
- )
-
- # check if jobs are removed
- self.assertEqual(200, res.status_code)
- for job_id in job_ids:
- self.assertFalse(JobExecution.exists(job_id, connection=queue.connection))
- self.assertNotIn(job_id, queue.job_ids)
-
- def test_job_list_action_requeue_jobs(self):
- queue = get_queue("django_tasks_scheduler_test")
- queue_name = "django_tasks_scheduler_test"
-
- # enqueue some jobs that will fail
- jobs = []
- job_ids = []
- for _ in range(0, 3):
- job = queue.enqueue(failing_job)
- jobs.append(job)
- job_ids.append(job.id)
-
- # do those jobs = fail them
- worker = create_worker("django_tasks_scheduler_test")
- worker.work(burst=True)
-
- # check if all jobs are really failed
- for job in jobs:
- self.assertTrue(job.is_failed)
-
- # re-nqueue failed jobs from failed queue
- self.client.post(reverse("queue_actions", args=[queue_name]), {"action": "requeue", "job_ids": job_ids})
-
- # check if we requeue all failed jobs
- for job in jobs:
- self.assertFalse(job.is_failed)
-
- def test_job_list_action_stop_jobs(self):
- queue_name = "django_tasks_scheduler_test"
- queue = get_queue(queue_name)
-
- # Enqueue some jobs
- job_ids = []
- worker = create_worker("django_tasks_scheduler_test")
- for _ in range(3):
- job = queue.enqueue(test_job)
- job_ids.append(job.id)
- worker.prepare_job_execution(job)
-
- # Check if the jobs are started
- for job_id in job_ids:
- job = JobExecution.fetch(job_id, connection=queue.connection)
- self.assertEqual(job.get_status(), ExecutionStatus.STARTED)
-
- # Stop those jobs using the view
- started_job_registry = queue.started_job_registry
- self.assertEqual(len(started_job_registry), len(job_ids))
- self.client.post(reverse("queue_actions", args=[queue_name]), {"action": "stop", "job_ids": job_ids})
- self.assertEqual(len(started_job_registry), 0)
-
- canceled_job_registry = queue.canceled_job_registry
- self.assertEqual(len(canceled_job_registry), len(job_ids))
-
- for job_id in job_ids:
- self.assertIn(job_id, canceled_job_registry)
-
-
-class QueueRegistryJobsViewTest(BaseTestCase):
- def test_queue_jobs_unknown_registry(self):
- queue_name = "default"
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "unknown"]), follow=True)
- self.assertEqual(404, res.status_code)
-
- def test_queue_jobs_unknown_queue(self):
- res = self.client.get(reverse("queue_registry_jobs", args=["UNKNOWN", "queued"]))
- self.assertEqual(404, res.status_code)
-
- def test_queued_jobs(self):
- """Jobs in queue are displayed properly"""
- queue = get_queue("default")
- job = queue.enqueue(test_job)
- queue_name = "default"
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "queued"]))
- self.assertEqual(res.context["jobs"], [job])
-
- def test_finished_jobs(self):
- """Ensure that finished jobs page works properly."""
- queue = get_queue("django_tasks_scheduler_test")
- queue_name = "django_tasks_scheduler_test"
-
- job = queue.enqueue(test_job)
- registry = queue.finished_job_registry
- registry.add(job, 2)
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "finished"]))
- self.assertEqual(res.context["jobs"], [job])
-
- def test_failed_jobs(self):
- """Ensure that failed jobs page works properly."""
- queue = get_queue("django_tasks_scheduler_test")
- queue_name = "django_tasks_scheduler_test"
-
- # Test that page doesn't fail when FailedJobRegistry is empty
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "failed"]))
- self.assertEqual(res.status_code, 200)
-
- job = queue.enqueue(test_job)
- registry = queue.failed_job_registry
- registry.add(job, 2)
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "failed"]))
- self.assertEqual(res.context["jobs"], [job])
-
- def test_scheduled_jobs(self):
- """Ensure that scheduled jobs page works properly."""
- queue = get_queue("django_tasks_scheduler_test")
- queue_name = "django_tasks_scheduler_test"
-
- # Test that page doesn't fail when ScheduledJobRegistry is empty
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "scheduled"]))
- self.assertEqual(res.status_code, 200)
-
- job = queue.enqueue_at(datetime.now(), test_job)
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "scheduled"]))
- self.assertEqual(res.context["jobs"], [job])
-
- def test_scheduled_jobs_registry_removal(self):
- """Ensure that non-existing job is being deleted from registry by view"""
- queue = get_queue("django_tasks_scheduler_test")
- queue_name = "django_tasks_scheduler_test"
-
- registry = queue.scheduled_job_registry
- job = queue.enqueue_at(datetime.now(), test_job)
- self.assertEqual(len(registry), 1)
-
- queue.connection.delete(job.key)
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "scheduled"]))
- self.assertEqual(res.context["jobs"], [])
-
- self.assertEqual(len(registry), 0)
-
- def test_started_jobs(self):
- """Ensure that active jobs page works properly."""
- queue = get_queue("django_tasks_scheduler_test")
- queue_name = "django_tasks_scheduler_test"
-
- job = queue.enqueue(test_job)
- registry = queue.started_job_registry
- registry.add(job, 2)
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "started"]))
- self.assertEqual(res.context["jobs"], [job])
-
- def test_deferred_jobs(self):
- """Ensure that active jobs page works properly."""
- queue = get_queue("django_tasks_scheduler_test")
- queue_name = "django_tasks_scheduler_test"
-
- job = queue.enqueue(test_job)
- registry = queue.deferred_job_registry
- registry.add(job, 2)
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "deferred"]))
- self.assertEqual(res.context["jobs"], [job])
-
-
-class ViewTest(BaseTestCase):
-
- def test_job_details(self):
- """Job data is displayed properly"""
- queue = get_queue("default")
- job = queue.enqueue(test_job)
-
- url = reverse(
- "job_details",
- args=[
- job.id,
- ],
- )
- res = self.client.get(url)
- self.assertIn("job", res.context)
- self.assertEqual(res.context["job"], job)
-
- # This page shouldn't fail when job.data is corrupt
- queue.connection.hset(job.key, "data", "non-pickleable data")
- res = self.client.get(url)
- self.assertEqual(res.status_code, 200)
- self.assertIn("DeserializationError", res.content.decode())
-
- # Bad job-id should return 404
- url = reverse(
- "job_details",
- args=[
- "bad_job_id",
- ],
- )
- res = self.client.get(url)
- self.assertEqual(400, res.status_code)
-
- def test_scheduled_job_details(self):
- """Job data is displayed properly"""
- scheduled_job = task_factory(TaskType.ONCE, enabled=True)
- job = _get_task_job_execution_from_registry(scheduled_job)
-
- url = reverse(
- "job_details",
- args=[
- job.id,
- ],
- )
- res = self.client.get(url, follow=True)
- self.assertIn("job", res.context)
- self.assertEqual(res.context["job"], job)
-
- def test_job_details_on_deleted_dependency(self):
- """Page doesn't crash even if job.dependency has been deleted"""
- queue = get_queue("default")
-
- job = queue.enqueue(test_job)
- second_job = queue.enqueue(test_job, depends_on=job)
- job.delete()
- url = reverse("job_details", args=[second_job.id])
- res = self.client.get(url)
- self.assertEqual(res.status_code, 200)
- self.assertIn(second_job._dependency_id, res.content.decode())
-
- def test_requeue_all(self):
- """
- Ensure that re-queuing all failed job work properly
- """
- queue = get_queue("default")
- queue_name = "default"
- queue.enqueue(failing_job)
- queue.enqueue(failing_job)
- worker = create_worker("default")
- worker.work(burst=True)
-
- res = self.client.get(reverse("queue_requeue_all", args=[queue_name, "failed"]))
- self.assertEqual(res.context["total_jobs"], 2)
- # After requeue_all is called, jobs are enqueued
- res = self.client.post(reverse("queue_requeue_all", args=[queue_name, "failed"]))
- self.assertEqual(len(queue), 2)
-
- def test_requeue_all_if_deleted_job(self):
- """
- Ensure that re-queuing all failed job work properly
- """
- queue = get_queue("default")
- queue_name = "default"
- job = queue.enqueue(failing_job)
- queue.enqueue(failing_job)
- worker = create_worker("default")
- worker.work(burst=True)
-
- res = self.client.get(reverse("queue_requeue_all", args=[queue_name, "failed"]))
- self.assertEqual(res.context["total_jobs"], 2)
- job.delete()
-
- # After requeue_all is called, jobs are enqueued
- res = self.client.post(reverse("queue_requeue_all", args=[queue_name, "failed"]))
- self.assertEqual(len(queue), 1)
-
- def test_clear_queue_unknown_registry(self):
- queue_name = "django_tasks_scheduler_test"
- res = self.client.post(reverse("queue_clear", args=[queue_name, "unknown"]), {"post": "yes"})
- self.assertEqual(404, res.status_code)
-
- def test_clear_queue_enqueued(self):
- queue = get_queue("django_tasks_scheduler_test")
- job = queue.enqueue(test_job)
- self.client.post(reverse("queue_clear", args=[queue.name, "queued"]), {"post": "yes"})
- self.assertFalse(JobExecution.exists(job.id, connection=queue.connection))
- self.assertNotIn(job.id, queue.job_ids)
-
- def test_clear_queue_scheduled(self):
- queue = get_queue("django_tasks_scheduler_test")
- job = queue.enqueue_at(datetime.now(), test_job)
-
- res = self.client.get(reverse("queue_clear", args=[queue.name, "scheduled"]), follow=True)
- self.assertEqual(200, res.status_code)
- self.assertEqual(
- res.context["jobs"],
- [
- job,
- ],
- )
-
- res = self.client.post(reverse("queue_clear", args=[queue.name, "scheduled"]), {"post": "yes"}, follow=True)
- assert_message_in_response(res, f"You have successfully cleared the scheduled jobs in queue {queue.name}")
- self.assertEqual(200, res.status_code)
- self.assertFalse(JobExecution.exists(job.id, connection=queue.connection))
- self.assertNotIn(job.id, queue.job_ids)
-
- def test_workers_home(self):
- res = self.client.get(reverse("workers_home"))
- prev_workers = res.context["workers"]
- worker1 = create_worker("django_tasks_scheduler_test")
- worker1.register_birth()
- worker2 = create_worker("test3")
- worker2.register_birth()
-
- res = self.client.get(reverse("workers_home"))
- self.assertEqual(res.context["workers"], prev_workers + [worker1, worker2])
-
- def test_queue_workers(self):
- """Worker index page should show workers for a specific queue"""
- queue_name = "django_tasks_scheduler_test"
-
- worker1 = create_worker("django_tasks_scheduler_test")
- worker1.register_birth()
- worker2 = create_worker("test3")
- worker2.register_birth()
-
- res = self.client.get(reverse("queue_workers", args=[queue_name]))
- self.assertEqual(res.context["workers"], [worker1])
-
- def test_worker_details(self):
- """Worker index page should show workers for a specific queue"""
-
- worker = create_worker("django_tasks_scheduler_test", name=uuid.uuid4().hex)
- worker.register_birth()
-
- url = reverse(
- "worker_details",
- args=[
- worker.name,
- ],
- )
- res = self.client.get(url)
- self.assertEqual(res.context["worker"], worker)
-
- def test_worker_details__non_existing_worker(self):
- """Worker index page should show workers for a specific queue"""
-
- worker = create_worker("django_tasks_scheduler_test", name="WORKER")
- worker.register_birth()
-
- res = self.client.get(reverse("worker_details", args=["bad-worker-name"]))
- self.assertEqual(404, res.status_code)
-
- def test_statistics_json_view(self):
- # Override testing SCHEDULER_QUEUES
- queues = {
- "default": {
- "DB": 0,
- "HOST": "localhost",
- "PORT": 6379,
- }
- }
- with patch("scheduler.settings.QUEUES", new_callable=PropertyMock(return_value=queues)):
- res = self.client.get(reverse("queues_home"))
- self.assertEqual(res.status_code, 200)
-
- res = self.client.get(reverse("queues_home_json"))
- self.assertEqual(res.status_code, 200)
-
- # Not staff => return 404
- self.user.is_staff = False
- self.user.save()
-
- res = self.client.get(reverse("queues_home"))
- self.assertEqual(res.status_code, 302)
-
- # 404 code for stats
- res = self.client.get(reverse("queues_home_json"))
- self.assertEqual(res.status_code, 404)
-
- @staticmethod
- def token_validation(token: str) -> bool:
- return token == "valid"
-
- # @patch('scheduler.views.SCHEDULER_CONFIG')
- # def test_statistics_json_view_token(self, configuration):
- # configuration.get.return_value = ViewTest.token_validation
- # self.user.is_staff = False
- # self.user.save()
- # res = self.client.get(reverse('queues_home_json'), headers={'Authorization': 'valid'})
- # self.assertEqual(res.status_code, 200)
- #
- # res = self.client.get(reverse('queues_home_json'), headers={'Authorization': 'invalid'})
- # self.assertEqual(res.status_code, 404)
diff --git a/scheduler/tests/test_views/__init__.py b/scheduler/tests/test_views/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/scheduler/tests/test_views/base.py b/scheduler/tests/test_views/base.py
new file mode 100644
index 0000000..22c2c8b
--- /dev/null
+++ b/scheduler/tests/test_views/base.py
@@ -0,0 +1,14 @@
+from django.contrib.auth.models import User
+from django.test import TestCase
+from django.test.client import Client
+
+from scheduler.helpers.queues import get_queue
+from scheduler.tests import test_settings # noqa
+
+
+class BaseTestCase(TestCase):
+ def setUp(self):
+ self.user = User.objects.create_superuser("user", password="pass")
+ self.client = Client()
+ self.client.login(username=self.user.username, password="pass")
+ get_queue("django_tasks_scheduler_test").connection.flushall()
diff --git a/scheduler/tests/test_views/test_job_details.py b/scheduler/tests/test_views/test_job_details.py
new file mode 100644
index 0000000..e058b1f
--- /dev/null
+++ b/scheduler/tests/test_views/test_job_details.py
@@ -0,0 +1,189 @@
+import uuid
+from datetime import datetime
+from unittest.mock import patch, PropertyMock
+
+from django.urls import reverse
+
+from scheduler._config_types import QueueConfiguration
+from scheduler.helpers.queues import get_queue
+from scheduler.helpers.tools import create_worker, TaskType
+from scheduler.redis_models import JobModel, WorkerModel
+from scheduler.tests import test_settings # noqa
+from scheduler.tests.jobs import failing_job, test_job
+from scheduler.tests.test_views.base import BaseTestCase
+from scheduler.tests.testtools import assert_message_in_response, task_factory, _get_task_job_execution_from_registry
+
+
+class TestViewJobDetails(BaseTestCase):
+
+ def test_job_details(self):
+ """Job data is displayed properly"""
+ queue = get_queue("default")
+ job = queue.enqueue_call(test_job)
+
+ url = reverse("job_details", args=[job.name])
+ res = self.client.get(url)
+ self.assertEqual(200, res.status_code)
+ self.assertIn("job", res.context)
+ self.assertEqual(res.context["job"], job)
+
+ # Bad job-id should return 404
+ url = reverse("job_details", args=["bad_job_id"])
+ res = self.client.get(url)
+ self.assertEqual(400, res.status_code)
+
+ def test_scheduled_job_details(self):
+ """Job data is displayed properly"""
+ scheduled_job = task_factory(TaskType.ONCE, enabled=True)
+ job = _get_task_job_execution_from_registry(scheduled_job)
+
+ url = reverse(
+ "job_details",
+ args=[
+ job.name,
+ ],
+ )
+ res = self.client.get(url, follow=True)
+ self.assertIn("job", res.context)
+ self.assertEqual(res.context["job"], job)
+
+ def test_job_details_on_deleted_dependency(self):
+ """Page doesn't crash even if job.dependency has been deleted"""
+ queue = get_queue("default")
+
+ job = queue.enqueue_call(test_job)
+ second_job = queue.enqueue_call(test_job)
+ queue.delete_job(job.name)
+ url = reverse("job_details", args=[second_job.name])
+ res = self.client.get(url)
+ self.assertEqual(res.status_code, 200)
+
+ def test_requeue_all(self):
+ """Ensure that re-queuing all failed job work properly"""
+ queue = get_queue("default")
+ queue_name = "default"
+ queue.enqueue_call(failing_job)
+ queue.enqueue_call(failing_job)
+ worker = create_worker("default")
+ worker.work(burst=True)
+
+ res = self.client.get(reverse("queue_requeue_all", args=[queue_name, "failed"]))
+ self.assertEqual(res.context["total_jobs"], 2)
+ # After requeue_all is called, jobs are enqueued
+ res = self.client.post(reverse("queue_requeue_all", args=[queue_name, "failed"]))
+ self.assertEqual(len(queue), 4)
+
+ def test_requeue_all_if_deleted_job(self):
+ """
+ Ensure that re-queuing all failed job work properly
+ """
+ queue = get_queue("default")
+ queue_name = "default"
+ job = queue.enqueue_call(failing_job)
+ queue.enqueue_call(failing_job)
+ worker = create_worker("default")
+ worker.work(burst=True)
+
+ res = self.client.get(reverse("queue_requeue_all", args=[queue_name, "failed"]))
+ self.assertEqual(res.context["total_jobs"], 2)
+ queue.delete_job(job.name)
+
+ # After requeue_all is called, jobs are enqueued
+ res = self.client.post(reverse("queue_requeue_all", args=[queue_name, "failed"]))
+ self.assertEqual(len(queue.queued_job_registry), 1)
+
+ def test_clear_queue_unknown_registry(self):
+ queue_name = "django_tasks_scheduler_test"
+ res = self.client.post(reverse("queue_clear", args=[queue_name, "unknown"]), {"post": "yes"})
+ self.assertEqual(404, res.status_code)
+
+ def test_clear_queue_enqueued(self):
+ queue = get_queue("django_tasks_scheduler_test")
+ job = queue.enqueue_call(test_job)
+ self.client.post(reverse("queue_clear", args=[queue.name, "queued"]), {"post": "yes"})
+ self.assertFalse(JobModel.exists(job.name, connection=queue.connection))
+ self.assertNotIn(job.name, queue.queued_job_registry.all())
+
+ def test_clear_queue_scheduled(self):
+ queue = get_queue("django_tasks_scheduler_test")
+ job = queue.enqueue_at(datetime.now(), test_job)
+
+ res = self.client.get(reverse("queue_clear", args=[queue.name, "scheduled"]), follow=True)
+ self.assertEqual(200, res.status_code)
+ self.assertEqual(res.context["jobs"], [job])
+
+ res = self.client.post(reverse("queue_clear", args=[queue.name, "scheduled"]), {"post": "yes"}, follow=True)
+ assert_message_in_response(res, f"You have successfully cleared the scheduled jobs in queue {queue.name}")
+ self.assertEqual(200, res.status_code)
+ self.assertFalse(JobModel.exists(job.name, connection=queue.connection))
+ self.assertNotIn(job.name, queue.queued_job_registry.all())
+
+ def test_queue_workers(self):
+ """Worker index page should show workers for a specific queue"""
+ queue_name = "django_tasks_scheduler_test"
+
+ worker1 = create_worker(queue_name)
+ worker1.register_birth()
+ worker2 = create_worker("test3")
+ worker2.register_birth()
+
+ res = self.client.get(reverse("queue_workers", args=[queue_name]))
+ worker1_model = WorkerModel.get(worker1.name, connection=worker1.connection)
+ self.assertEqual(res.context["workers"], [worker1_model])
+
+ def test_worker_details(self):
+ """Worker index page should show workers for a specific queue"""
+
+ worker = create_worker("django_tasks_scheduler_test", name=uuid.uuid4().hex)
+ worker.register_birth()
+
+ url = reverse("worker_details", args=[worker.name])
+ res = self.client.get(url)
+ self.assertEqual(res.context["worker"], worker._model)
+
+ def test_worker_details__non_existing_worker(self):
+ """Worker index page should show workers for a specific queue"""
+
+ worker = create_worker("django_tasks_scheduler_test", name="WORKER")
+ worker.register_birth()
+
+ res = self.client.get(reverse("worker_details", args=["bad-worker-name"]))
+ self.assertEqual(404, res.status_code)
+
+ def test_statistics_json_view(self):
+ # Override testing SCHEDULER_QUEUES
+ queues = {
+ "default": QueueConfiguration(DB=0, HOST="localhost", PORT=6379),
+ }
+ with patch("scheduler.settings._QUEUES", new_callable=PropertyMock(return_value=queues)):
+ res = self.client.get(reverse("queues_home"))
+ self.assertEqual(res.status_code, 200)
+
+ res = self.client.get(reverse("queues_home_json"))
+ self.assertEqual(res.status_code, 200)
+
+ # Not staff => return 404
+ self.user.is_staff = False
+ self.user.save()
+
+ res = self.client.get(reverse("queues_home"))
+ self.assertEqual(res.status_code, 302)
+
+ # 404 code for stats
+ res = self.client.get(reverse("queues_home_json"))
+ self.assertEqual(res.status_code, 404)
+
+ @staticmethod
+ def token_validation(token: str) -> bool:
+ return token == "valid"
+
+ # @patch('scheduler.views.SCHEDULER_CONFIG')
+ # def test_statistics_json_view_token(self, configuration):
+ # configuration.get.return_value = ViewTest.token_validation
+ # self.user.is_staff = False
+ # self.user.save()
+ # res = self.client.get(reverse('queues_home_json'), headers={'Authorization': 'valid'})
+ # self.assertEqual(res.status_code, 200)
+ #
+ # res = self.client.get(reverse('queues_home_json'), headers={'Authorization': 'invalid'})
+ # self.assertEqual(res.status_code, 404)
diff --git a/scheduler/tests/test_views/test_queue_actions.py b/scheduler/tests/test_views/test_queue_actions.py
new file mode 100644
index 0000000..ae2a0e1
--- /dev/null
+++ b/scheduler/tests/test_views/test_queue_actions.py
@@ -0,0 +1,124 @@
+from django.urls import reverse
+
+from scheduler.helpers.queues import get_queue
+from scheduler.helpers.tools import create_worker
+from scheduler.redis_models import JobStatus, JobModel
+from scheduler.tests.jobs import failing_job, test_job
+from scheduler.tests.test_views.base import BaseTestCase
+from scheduler.tests.testtools import assert_message_in_response
+
+
+class JobListActionViewsTest(BaseTestCase):
+ def test_job_list_action_delete_jobs__with_bad_next_url(self):
+ queue = get_queue("django_tasks_scheduler_test")
+
+ # enqueue some jobs
+ job_ids = []
+ for _ in range(0, 3):
+ job = queue.enqueue_call(test_job)
+ job_ids.append(job.name)
+
+ # remove those jobs using view
+ res = self.client.post(
+ reverse(
+ "queue_actions",
+ args=[
+ queue.name,
+ ],
+ ),
+ {
+ "action": "delete",
+ "job_names": job_ids,
+ "next_url": "bad_url",
+ },
+ follow=True,
+ )
+ assert_message_in_response(res, "Bad followup URL")
+ # check if jobs are removed
+ self.assertEqual(200, res.status_code)
+ for job_id in job_ids:
+ self.assertFalse(JobModel.exists(job_id, connection=queue.connection))
+ self.assertNotIn(job_id, queue.queued_job_registry.all())
+
+ def test_job_list_action_delete_jobs(self):
+ queue = get_queue("django_tasks_scheduler_test")
+
+ # enqueue some jobs
+ job_ids = []
+ for _ in range(0, 3):
+ job = queue.enqueue_call(test_job)
+ job_ids.append(job.name)
+
+ # remove those jobs using view
+ res = self.client.post(
+ reverse(
+ "queue_actions",
+ args=[
+ queue.name,
+ ],
+ ),
+ {"action": "delete", "job_names": job_ids},
+ follow=True,
+ )
+
+ # check if jobs are removed
+ self.assertEqual(200, res.status_code)
+ for job_id in job_ids:
+ self.assertFalse(JobModel.exists(job_id, connection=queue.connection))
+ self.assertNotIn(job_id, queue.queued_job_registry.all())
+
+ def test_job_list_action_requeue_jobs(self):
+ queue_name = "django_tasks_scheduler_test"
+ queue = get_queue(queue_name)
+
+ # enqueue some jobs that will fail
+ job_names = []
+ for _ in range(0, 3):
+ job = queue.enqueue_call(failing_job)
+ job_names.append(job.name)
+
+ # do those jobs = fail them
+ worker = create_worker(queue_name)
+ worker.work(burst=True)
+
+ # check if all jobs are really failed
+ for job_name in job_names:
+ job = JobModel.get(job_name, connection=queue.connection)
+ self.assertTrue(job.is_failed)
+
+ # re-nqueue failed jobs from failed queue
+ self.client.post(reverse("queue_actions", args=[queue_name]), {"action": "requeue", "job_names": job_names})
+
+ # check if we requeue all failed jobs
+ for job_name in job_names:
+ job = JobModel.get(job_name, connection=queue.connection)
+ self.assertFalse(job.is_failed)
+
+ def test_job_list_action_stop_jobs(self):
+ queue_name = "django_tasks_scheduler_test"
+ queue = get_queue(queue_name)
+
+ # Enqueue some jobs
+ job_names = []
+ worker = create_worker(queue_name)
+ worker.bootstrap()
+ for _ in range(3):
+ job = queue.enqueue_call(test_job)
+ job_names.append(job.name)
+ worker.prepare_job_execution(job)
+
+ # Check if the jobs are started
+ for job_name in job_names:
+ job = JobModel.get(job_name, connection=queue.connection)
+ self.assertEqual(job.status, JobStatus.STARTED)
+
+ # Stop those jobs using the view
+ self.assertEqual(len(queue.started_job_registry), len(job_names))
+ self.client.post(reverse("queue_actions", args=[queue_name]), {"action": "stop", "job_names": job_names})
+ self.assertEqual(len(queue.started_job_registry), 0)
+
+ canceled_job_registry = queue.canceled_job_registry
+ self.assertEqual(len(canceled_job_registry), len(job_names))
+
+ for job_name in job_names:
+ self.assertIn(job_name, canceled_job_registry.all())
diff --git a/scheduler/tests/test_views/test_queue_job_action.py b/scheduler/tests/test_views/test_queue_job_action.py
new file mode 100644
index 0000000..6f571fa
--- /dev/null
+++ b/scheduler/tests/test_views/test_queue_job_action.py
@@ -0,0 +1,96 @@
+from django.urls import reverse
+
+from scheduler.helpers.queues import get_queue
+from scheduler.helpers.tools import create_worker
+from scheduler.redis_models import JobStatus, JobModel
+from scheduler.tests.jobs import failing_job, long_job, test_job
+from scheduler.tests.testtools import assert_message_in_response
+from .base import BaseTestCase
+
+
+class SingleJobActionViewsTest(BaseTestCase):
+
+ def test_single_job_action_unknown_job(self):
+ res = self.client.get(reverse("queue_job_action", args=["unknown", "cancel"]), follow=True)
+ self.assertEqual(400, res.status_code)
+
+ def test_single_job_action_unknown_action(self):
+ queue = get_queue("default")
+ job = queue.enqueue_call(failing_job)
+ worker = create_worker("default")
+ worker.work(burst=True)
+ job = JobModel.get(job.name, connection=queue.connection)
+ self.assertTrue(job.is_failed)
+ res = self.client.get(reverse("queue_job_action", args=[job.name, "unknown"]), follow=True)
+ self.assertEqual(404, res.status_code)
+
+ def test_single_job_action_requeue_job(self):
+ queue = get_queue("default")
+ job = queue.enqueue_call(failing_job)
+ worker = create_worker("default")
+ worker.work(burst=True)
+ job = JobModel.get(job.name, connection=queue.connection)
+ self.assertTrue(job.is_failed)
+ res = self.client.get(reverse("queue_job_action", args=[job.name, "requeue"]), follow=True)
+ self.assertEqual(200, res.status_code)
+ self.client.post(reverse("queue_job_action", args=[job.name, "requeue"]), {"requeue": "Requeue"}, follow=True)
+ self.assertIn(job, JobModel.get_many(queue.queued_job_registry.all(), queue.connection))
+ queue.delete_job(job.name)
+
+ def test_single_job_action_delete_job(self):
+ queue = get_queue("django_tasks_scheduler_test")
+ job = queue.enqueue_call(test_job)
+ res = self.client.get(reverse("queue_job_action", args=[job.name, "delete"]), follow=True)
+ self.assertEqual(200, res.status_code)
+ self.client.post(reverse("queue_job_action", args=[job.name, "delete"]), {"post": "yes"}, follow=True)
+ self.assertFalse(JobModel.exists(job.name, connection=queue.connection))
+ self.assertNotIn(job.name, queue.queued_job_registry.all())
+
+ def test_single_job_action_cancel_job(self):
+ queue = get_queue("django_tasks_scheduler_test")
+ job = queue.enqueue_call(long_job)
+ res = self.client.get(reverse("queue_job_action", args=[job.name, "cancel"]), follow=True)
+ self.assertEqual(200, res.status_code)
+ res = self.client.post(reverse("queue_job_action", args=[job.name, "cancel"]), {"post": "yes"}, follow=True)
+ self.assertEqual(200, res.status_code)
+ tmp = JobModel.get(job.name, connection=queue.connection)
+ self.assertTrue(tmp.is_canceled)
+ self.assertNotIn(job.name, queue.queued_job_registry.all())
+
+ def test_single_job_action_cancel_job_that_is_already_cancelled(self):
+ queue = get_queue("django_tasks_scheduler_test")
+ job = queue.enqueue_call(long_job)
+ res = self.client.post(reverse("queue_job_action", args=[job.name, "cancel"]), {"post": "yes"}, follow=True)
+ self.assertEqual(200, res.status_code)
+ tmp = JobModel.get(job.name, connection=queue.connection)
+ self.assertTrue(tmp.is_canceled)
+ self.assertNotIn(job.name, queue.queued_job_registry.all())
+ res = self.client.post(reverse("queue_job_action", args=[job.name, "cancel"]), {"post": "yes"}, follow=True)
+ self.assertEqual(200, res.status_code)
+ assert_message_in_response(res, f"Could not perform action: Cannot cancel already canceled job: {job.name}")
+
+ def test_single_job_action_enqueue_job(self):
+ queue = get_queue("django_tasks_scheduler_test")
+ job_list = []
+ # enqueue some jobs that depends on other
+ previous_job = None
+ for _ in range(0, 3):
+ job = queue.enqueue_call(test_job)
+ job_list.append(job)
+ previous_job = job
+
+ # This job is deferred
+
+ self.assertEqual(job_list[-1].get_status(connection=queue.connection), JobStatus.QUEUED)
+ self.assertIsNotNone(job_list[-1].enqueued_at)
+
+ # Try to force enqueue last job should do nothing
+ res = self.client.get(reverse("queue_job_action", args=[job_list[-1].name, "enqueue"]), follow=True)
+ self.assertEqual(200, res.status_code)
+ res = self.client.post(reverse("queue_job_action", args=[job_list[-1].name, "enqueue"]), follow=True)
+
+ # Check that job is still deferred because it has dependencies (rq 1.14 change)
+ self.assertEqual(200, res.status_code)
+ tmp = JobModel.get(job_list[-1].name, connection=queue.connection)
+ self.assertEqual(tmp.get_status(connection=queue.connection), JobStatus.QUEUED)
+ self.assertIsNotNone(tmp.enqueued_at)
diff --git a/scheduler/tests/test_views/test_queue_registry_jobs.py b/scheduler/tests/test_views/test_queue_registry_jobs.py
new file mode 100644
index 0000000..69bd9f1
--- /dev/null
+++ b/scheduler/tests/test_views/test_queue_registry_jobs.py
@@ -0,0 +1,92 @@
+import time
+from datetime import datetime
+
+from django.urls import reverse
+
+from scheduler.helpers.queues import get_queue
+from scheduler.tests.jobs import test_job
+from scheduler.tests.test_views.base import BaseTestCase
+
+
+class QueueRegistryJobsViewTest(BaseTestCase):
+ def test_queue_jobs_unknown_registry(self):
+ queue_name = "default"
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "unknown"]), follow=True)
+ self.assertEqual(404, res.status_code)
+
+ def test_queue_jobs_unknown_queue(self):
+ res = self.client.get(reverse("queue_registry_jobs", args=["UNKNOWN", "queued"]))
+ self.assertEqual(404, res.status_code)
+
+ def test_queued_jobs(self):
+ """Jobs in queue are displayed properly"""
+ queue = get_queue("default")
+ job = queue.enqueue_call(test_job)
+ queue_name = "default"
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "queued"]))
+ self.assertEqual(res.context["jobs"], [job])
+
+ def test_finished_jobs(self):
+ """Ensure that finished jobs page works properly."""
+ queue = get_queue("django_tasks_scheduler_test")
+ queue_name = "django_tasks_scheduler_test"
+
+ job = queue.enqueue_call(test_job)
+ registry = queue.finished_job_registry
+ registry.add(queue.connection, job.name, time.time() + 2)
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "finished"]))
+ self.assertEqual(res.context["jobs"], [job])
+
+ def test_failed_jobs(self):
+ """Ensure that failed jobs page works properly."""
+ queue = get_queue("django_tasks_scheduler_test")
+ queue_name = "django_tasks_scheduler_test"
+
+ # Test that page doesn't fail when FailedJobRegistry is empty
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "failed"]))
+ self.assertEqual(res.status_code, 200)
+
+ job = queue.enqueue_call(test_job)
+ registry = queue.failed_job_registry
+ registry.add(queue.connection, job.name, time.time() + 20)
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "failed"]))
+ self.assertEqual(res.context["jobs"], [job])
+
+ def test_scheduled_jobs(self):
+ """Ensure that scheduled jobs page works properly."""
+ queue = get_queue("django_tasks_scheduler_test")
+ queue_name = "django_tasks_scheduler_test"
+
+ # Test that page doesn't fail when ScheduledJobRegistry is empty
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "scheduled"]))
+ self.assertEqual(res.status_code, 200)
+
+ job = queue.enqueue_at(datetime.now(), test_job)
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "scheduled"]))
+ self.assertEqual(res.context["jobs"], [job])
+
+ def test_scheduled_jobs_registry_removal(self):
+ """Ensure that non-existing job is being deleted from registry by view"""
+ queue = get_queue("django_tasks_scheduler_test")
+ queue_name = "django_tasks_scheduler_test"
+
+ registry = queue.scheduled_job_registry
+ job = queue.enqueue_at(datetime.now(), test_job)
+ self.assertEqual(len(registry), 1)
+
+ queue.delete_job(job.name)
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "scheduled"]))
+ self.assertEqual(res.context["jobs"], [])
+
+ self.assertEqual(len(registry), 0)
+
+ def test_started_jobs(self):
+ """Ensure that active jobs page works properly."""
+ queue = get_queue("django_tasks_scheduler_test")
+ queue_name = "django_tasks_scheduler_test"
+
+ job = queue.enqueue_call(test_job)
+ registry = queue.started_job_registry
+ registry.add(queue.connection, job.name, time.time() + 20)
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "started"]))
+ self.assertEqual(res.context["jobs"], [job])
diff --git a/scheduler/tests/test_views/test_workers_view.py b/scheduler/tests/test_views/test_workers_view.py
new file mode 100644
index 0000000..26cf8af
--- /dev/null
+++ b/scheduler/tests/test_views/test_workers_view.py
@@ -0,0 +1,19 @@
+from django.urls import reverse
+
+from scheduler.helpers.tools import create_worker
+from scheduler.tests import test_settings # noqa
+from scheduler.tests.test_views.base import BaseTestCase
+
+
+class TestViewWorkers(BaseTestCase):
+
+ def test_workers_home(self):
+ res = self.client.get(reverse("workers_home"))
+ prev_workers = res.context["workers"]
+ worker1 = create_worker("django_tasks_scheduler_test")
+ worker1.register_birth()
+ worker2 = create_worker("test3")
+ worker2.register_birth()
+
+ res = self.client.get(reverse("workers_home"))
+ self.assertEqual(res.context["workers"], prev_workers + [worker1._model, worker2._model])
diff --git a/scheduler/tests/test_worker/__init__.py b/scheduler/tests/test_worker/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/scheduler/tests/test_worker/test_worker_commands.py b/scheduler/tests/test_worker/test_worker_commands.py
new file mode 100644
index 0000000..e69de29
diff --git a/scheduler/tests/test_worker.py b/scheduler/tests/test_worker/test_worker_creation.py
similarity index 68%
rename from scheduler/tests/test_worker.py
rename to scheduler/tests/test_worker/test_worker_creation.py
index 4b40bfb..0eb01bf 100644
--- a/scheduler/tests/test_worker.py
+++ b/scheduler/tests/test_worker/test_worker_creation.py
@@ -1,13 +1,12 @@
import os
import uuid
-from rq.job import Job
-from scheduler.rq_classes import JobExecution
-from scheduler.tests.testtools import SchedulerBaseCase
-from scheduler.tools import create_worker
-from . import test_settings # noqa
-from .. import settings
+from django.conf import settings
+from scheduler.helpers.tools import create_worker
+from scheduler.tests import test_settings # noqa
+from scheduler.tests.testtools import SchedulerBaseCase
+from scheduler import settings
class TestWorker(SchedulerBaseCase):
def test_create_worker__two_workers_same_queue(self):
@@ -40,13 +39,3 @@ def test_create_worker__scheduler_interval(self):
worker.work(burst=True)
self.assertEqual(worker.scheduler.interval, 1)
settings.SCHEDULER_CONFIG.SCHEDULER_INTERVAL = prev
-
- def test_get_worker_with_custom_job_class(self):
- # Test with string representation of job_class
- worker = create_worker("default", job_class="scheduler.rq_classes.JobExecution")
- self.assertTrue(issubclass(worker.job_class, Job))
- self.assertTrue(issubclass(worker.job_class, JobExecution))
-
- def test_get_worker_without_custom_job_class(self):
- worker = create_worker("default")
- self.assertTrue(issubclass(worker.job_class, JobExecution))
diff --git a/scheduler/tests/testtools.py b/scheduler/tests/testtools.py
index 0be3586..0f71f57 100644
--- a/scheduler/tests/testtools.py
+++ b/scheduler/tests/testtools.py
@@ -1,4 +1,5 @@
from datetime import timedelta
+from typing import List
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
@@ -9,8 +10,9 @@
from scheduler import settings
from scheduler.models.args import TaskKwarg
from scheduler.models.task import Task
-from scheduler.queues import get_queue
-from scheduler.tools import TaskType
+from scheduler.helpers.queues import get_queue
+from scheduler.redis_models import JobModel
+from scheduler.helpers.tools import TaskType
def assert_message_in_response(response, message):
@@ -34,7 +36,7 @@ def task_factory(
values = dict(
name="Scheduled Job %d" % next(seq),
job_id=None,
- queue=list(settings.QUEUES.keys())[0],
+ queue=list(settings._QUEUES.keys())[0],
callable=callable_name,
enabled=True,
timeout=None,
@@ -88,17 +90,16 @@ def taskarg_factory(cls, **kwargs):
return instance
-def _get_task_job_execution_from_registry(django_task: Task):
- jobs_to_schedule = django_task.rqueue.scheduled_job_registry.get_job_ids()
+def _get_task_job_execution_from_registry(django_task: Task) -> JobModel:
+ jobs_to_schedule = django_task.rqueue.scheduled_job_registry.all()
entry = next(i for i in jobs_to_schedule if i == django_task.job_id)
- return django_task.rqueue.fetch_job(entry)
+ return JobModel.get(entry, connection=django_task.rqueue.connection)
def _get_executions(django_job: Task):
job_ids = django_job.rqueue.get_all_job_ids()
- return list(
- filter(lambda j: j.is_execution_of(django_job), map(lambda jid: django_job.rqueue.fetch_job(jid), job_ids))
- )
+ job_list: List[JobModel] = JobModel.get_many(job_ids, connection=django_job.rqueue.connection)
+ return list(filter(lambda j: j.is_execution_of(django_job), job_list))
class SchedulerBaseCase(TestCase):
diff --git a/scheduler/views.py b/scheduler/views.py
index 3394764..7184c79 100644
--- a/scheduler/views.py
+++ b/scheduler/views.py
@@ -1,6 +1,7 @@
+import dataclasses
from html import escape
from math import ceil
-from typing import Tuple, Optional
+from typing import Tuple, Optional, List, Union, Dict, Any
from django.contrib import admin, messages
from django.contrib.admin.views.decorators import staff_member_required
@@ -12,14 +13,18 @@
from django.urls import reverse, resolve
from django.views.decorators.cache import never_cache
+from scheduler.helpers.queues import Queue, InvalidJobOperation
+from scheduler.helpers.queues import get_all_workers, get_queue as get_queue_base
from .broker_types import ConnectionErrorTypes, ResponseErrorTypes
-from .queues import get_all_workers, get_connection, QueueNotFoundError
-from .queues import get_queue as get_queue_base
-from .rq_classes import JobExecution, DjangoWorker, DjangoQueue, InvalidJobOperation
-from .settings import SCHEDULER_CONFIG, logger
+from .redis_models import Result, WorkerModel
+from .redis_models.job import JobModel
+from .redis_models.registry.base_registry import JobNamesRegistry
+from .settings import SCHEDULER_CONFIG, logger, get_queue_names, QueueNotFoundError
+from .worker.commands import StopJobCommand
+from .worker.commands import send_command
-def get_queue(queue_name: str) -> DjangoQueue:
+def get_queue(queue_name: str) -> Queue:
try:
return get_queue_base(queue_name)
except QueueNotFoundError as e:
@@ -27,24 +32,17 @@ def get_queue(queue_name: str) -> DjangoQueue:
raise Http404(e)
-def get_worker_executions(worker):
+def get_worker_executions(worker: WorkerModel) -> List[JobModel]:
res = list()
- for queue in worker.queues:
+ for queue_name in worker.queue_names:
+ queue = get_queue(queue_name)
curr_jobs = queue.get_all_jobs()
curr_jobs = [j for j in curr_jobs if j.worker_name == worker.name]
res.extend(curr_jobs)
return res
-# Create your views here.
-@never_cache
-@staff_member_required
-def stats(request):
- context_data = {**admin.site.each_context(request), **get_statistics(run_maintenance_tasks=True)}
- return render(request, "admin/scheduler/stats.html", context_data)
-
-
-def stats_json(request):
+def stats_json(request: HttpRequest) -> Union[JsonResponse, HttpResponseNotFound]:
auth_token = request.headers.get("Authorization")
token_validation_func = SCHEDULER_CONFIG.TOKEN_VALIDATION_METHOD
if request.user.is_staff or (token_validation_func and auth_token and token_validation_func(auth_token)):
@@ -53,19 +51,44 @@ def stats_json(request):
return HttpResponseNotFound()
-def get_statistics(run_maintenance_tasks=False):
- from scheduler.settings import QUEUES
+# Create your views here.
+@never_cache
+@staff_member_required
+def stats(request: HttpRequest) -> HttpResponse:
+ context_data = {**admin.site.each_context(request), **get_statistics(run_maintenance_tasks=True)}
+ return render(request, "admin/scheduler/stats.html", context_data)
- queues = []
- if run_maintenance_tasks:
- workers = get_all_workers()
- for worker in workers:
- worker.clean_registries()
- for queue_name in QUEUES:
+
+@dataclasses.dataclass
+class QueueData:
+ name: str
+ jobs: int
+ oldest_job_timestamp: str
+ connection_kwargs: dict
+ scheduler_pid: int
+ workers: int
+ finished_jobs: int
+ started_jobs: int
+ failed_jobs: int
+ scheduled_jobs: int
+ canceled_jobs: int
+
+
+def get_statistics(run_maintenance_tasks: bool = False) -> Dict[str, List[Dict[str, Any]]]:
+ queue_names = get_queue_names()
+ queues: List[QueueData] = []
+ queue_workers_count: Dict[str, int] = {queue_name: 0 for queue_name in queue_names}
+ workers = get_all_workers()
+ for worker in workers:
+ for queue_name in worker.queue_names:
+ if queue_name not in queue_workers_count:
+ logger.warning(f"Worker {worker.name} ({queue_name}) has no queue")
+ queue_workers_count[queue_name] = 0
+ queue_workers_count[queue_name] += 1
+ for queue_name in queue_names:
try:
queue = get_queue(queue_name)
- connection = get_connection(QUEUES[queue_name])
- connection_kwargs = connection.connection_pool.connection_kwargs
+ connection_kwargs = queue.connection.connection_pool.connection_kwargs
if run_maintenance_tasks:
queue.clean_registries()
@@ -75,10 +98,10 @@ def get_statistics(run_maintenance_tasks=False):
# with `at_front` parameters.
# Ideally rq should supports Queue.oldest_job
- last_job_id = queue.last_job_id()
- last_job = queue.fetch_job(last_job_id.decode("utf-8")) if last_job_id else None
+ last_job_name = queue.first_queued_job_name()
+ last_job = JobModel.get(last_job_name, connection=queue.connection) if last_job_name else None
if last_job and last_job.enqueued_at:
- oldest_job_timestamp = last_job.enqueued_at.strftime("%Y-%m-%d, %H:%M:%S")
+ oldest_job_timestamp = last_job.enqueued_at.isoformat()
else:
oldest_job_timestamp = "-"
@@ -86,16 +109,15 @@ def get_statistics(run_maintenance_tasks=False):
connection_kwargs.pop("parser_class", None)
connection_kwargs.pop("connection_pool", None)
- queue_data = dict(
+ queue_data = QueueData(
name=queue.name,
jobs=queue.count,
oldest_job_timestamp=oldest_job_timestamp,
connection_kwargs=connection_kwargs,
scheduler_pid=queue.scheduler_pid,
- workers=DjangoWorker.count(queue=queue),
+ workers=queue_workers_count[queue.name],
finished_jobs=len(queue.finished_job_registry),
started_jobs=len(queue.started_job_registry),
- deferred_jobs=len(queue.deferred_job_registry),
failed_jobs=len(queue.failed_job_registry),
scheduled_jobs=len(queue.scheduled_job_registry),
canceled_jobs=len(queue.canceled_job_registry),
@@ -105,34 +127,36 @@ def get_statistics(run_maintenance_tasks=False):
logger.error(f"Could not connect for queue {queue_name}: {e}")
continue
- return {"queues": queues}
+ return {"queues": [dataclasses.asdict(q) for q in queues]}
-def _get_registry_job_list(queue, registry, page):
+def _get_registry_job_list(
+ queue: Queue, registry: JobNamesRegistry, page: int
+) -> Tuple[List[JobModel], int, range]:
items_per_page = SCHEDULER_CONFIG.EXECUTIONS_IN_PAGE
- num_jobs = len(registry)
- job_list = []
+ num_jobs = registry.count(queue.connection)
+ job_list = list()
if num_jobs == 0:
- return job_list, num_jobs, []
+ return job_list, num_jobs, range(1, 1)
last_page = int(ceil(num_jobs / items_per_page))
page_range = range(1, last_page + 1)
offset = items_per_page * (page - 1)
- job_ids = registry.get_job_ids(offset, offset + items_per_page - 1)
- job_list = JobExecution.fetch_many(job_ids, connection=queue.connection)
+ job_ids = registry.all(offset, offset + items_per_page - 1)
+ job_list = JobModel.get_many(job_ids, connection=queue.connection)
remove_job_ids = [job_id for i, job_id in enumerate(job_ids) if job_list[i] is None]
valid_jobs = [job for job in job_list if job is not None]
if registry is not queue:
for job_id in remove_job_ids:
- registry.remove(job_id)
+ registry.delete(queue.connection, job_id)
return valid_jobs, num_jobs, page_range
@never_cache
@staff_member_required
-def jobs_view(request, queue_name: str, registry_name: str):
+def jobs_view(request: HttpRequest, queue_name: str, registry_name: str) -> HttpResponse:
queue = get_queue(queue_name)
registry = queue.get_registry(registry_name)
if registry is None:
@@ -159,10 +183,11 @@ def jobs_view(request, queue_name: str, registry_name: str):
@staff_member_required
def queue_workers(request: HttpRequest, queue_name: str) -> HttpResponse:
queue = get_queue(queue_name)
- all_workers = DjangoWorker.all(queue.connection)
- for w in all_workers:
- w.clean_registries()
- worker_list = [worker for worker in all_workers if queue.name in worker.queue_names()]
+ queue.clean_registries()
+ from .redis_models.worker import WorkerModel
+
+ all_workers = WorkerModel.all(queue.connection)
+ worker_list = [worker for worker in all_workers if queue.name in worker.queue_names]
context_data = {
**admin.site.each_context(request),
@@ -188,14 +213,13 @@ def workers(request: HttpRequest) -> HttpResponse:
@never_cache
@staff_member_required
def worker_details(request: HttpRequest, name: str) -> HttpResponse:
- queue, worker = None, None
+ queue_names = get_queue_names()
+ queue = get_queue(queue_names[0])
workers = get_all_workers()
worker = next((w for w in workers if w.name == name), None)
if worker is None:
raise Http404(f"Couldn't find worker with this ID: {name}")
- # Convert microseconds to milliseconds
- worker.total_working_time = worker.total_working_time / 1000
execution_list = get_worker_executions(worker)
paginator = Paginator(execution_list, SCHEDULER_CONFIG.EXECUTIONS_IN_PAGE)
@@ -206,9 +230,9 @@ def worker_details(request: HttpRequest, name: str) -> HttpResponse:
**admin.site.each_context(request),
"queue": queue,
"worker": worker,
- "queue_names": ", ".join(worker.queue_names()),
- "job": worker.get_current_job(),
- "total_working_time": worker.total_working_time * 1000,
+ "queue_names": ", ".join(worker.queue_names),
+ "job": worker.get_field("current_job_name", queue.connection),
+ "total_working_time": worker.total_working_time,
"executions": page_obj,
"page_range": page_range,
"page_var": "p",
@@ -216,16 +240,16 @@ def worker_details(request: HttpRequest, name: str) -> HttpResponse:
return render(request, "admin/scheduler/worker_details.html", context_data)
-def _find_job(job_id: str) -> Tuple[Optional[DjangoQueue], Optional[JobExecution]]:
- from scheduler.settings import QUEUES
-
- for queue_name in QUEUES:
+def _find_job(job_name: str) -> Tuple[Optional[Queue], Optional[JobModel]]:
+ queue_names = get_queue_names()
+ for queue_name in queue_names:
try:
queue = get_queue(queue_name)
- job = JobExecution.fetch(job_id, connection=queue.connection)
- if job.origin == queue_name:
+ job = JobModel.get(job_name, connection=queue.connection)
+ if job is not None and job.queue_name == queue_name:
return queue, job
- except Exception:
+ except Exception as e:
+ logger.debug(f"Got exception: {e}")
pass
return None, None
@@ -243,14 +267,14 @@ def job_detail(request: HttpRequest, job_id: str) -> HttpResponse:
data_is_valid = False
try:
- exc_info = job._exc_info
+ last_result = Result.fetch_latest(queue.connection, job.name)
+ exc_info = last_result.exc_string
except AttributeError:
exc_info = None
context_data = {
**admin.site.each_context(request),
"job": job,
- "dependency_id": job._dependency_id,
"queue": queue,
"data_is_valid": data_is_valid,
"exc_info": exc_info,
@@ -271,20 +295,19 @@ def clear_queue_registry(request: HttpRequest, queue_name: str, registry_name: s
try:
if registry is queue:
queue.empty()
- else:
- job_ids = registry.get_job_ids()
- for job_id in job_ids:
- registry.remove(job_id, delete_job=True)
+ elif isinstance(registry, JobNamesRegistry):
+ job_names = registry.all()
+ for job_id in job_names:
+ registry.delete(registry.connection, job_id)
+ job_model = JobModel.get(job_id, connection=registry.connection)
+ job_model.delete(connection=registry.connection)
messages.info(request, f"You have successfully cleared the {registry_name} jobs in queue {queue.name}")
except ResponseErrorTypes as e:
- messages.error(
- request,
- f"error: {e}",
- )
+ messages.error(request, f"error: {e}")
raise e
return redirect("queue_registry_jobs", queue_name, registry_name)
- job_ids = registry.get_job_ids()
- job_list = JobExecution.fetch_many(job_ids, connection=queue.connection)
+ job_names = registry.all()
+ job_list = JobModel.get_many(job_names, connection=queue.connection)
context_data = {
**admin.site.each_context(request),
"queue": queue,
@@ -311,29 +334,19 @@ def requeue_all(request: HttpRequest, queue_name: str, registry_name: str) -> Ht
if registry is None:
return HttpResponseNotFound()
next_url = request.META.get("HTTP_REFERER") or reverse("queue_registry_jobs", args=[queue_name, registry_name])
- job_ids = registry.get_job_ids()
+ job_names = registry.all()
if request.method == "POST":
- count = 0
# Confirmation received
- jobs = JobExecution.fetch_many(job_ids, connection=queue.connection)
- for job in jobs:
- if job is None:
- continue
- try:
- job.requeue()
- count += 1
- except Exception:
- pass
-
- messages.info(request, f"You have successfully re-queued {count} jobs!")
+ jobs_requeued_count = queue.requeue_jobs(*job_names)
+ messages.info(request, f"You have successfully re-queued {jobs_requeued_count} jobs!")
return redirect("queue_registry_jobs", queue_name, registry_name)
context_data = {
**admin.site.each_context(request),
"queue": queue,
- "total_jobs": len(queue.failed_job_registry),
+ "total_jobs": queue.count,
"action": "requeue",
- "jobs": [queue.fetch_job(job_id) for job_id in job_ids],
+ "jobs": [JobModel.get(job_id, connection=queue.connection) for job_id in job_names],
"next_url": next_url,
"action_url": reverse("queue_requeue_all", args=[queue_name, registry_name]),
}
@@ -359,7 +372,7 @@ def confirm_action(request: HttpRequest, queue_name: str) -> HttpResponse:
context_data = {
**admin.site.each_context(request),
"action": request.POST["action"],
- "jobs": [queue.fetch_job(job_id) for job_id in job_id_list],
+ "jobs": [JobModel.get(job_id, connection=queue.connection) for job_id in job_id_list],
"total_jobs": len(job_id_list),
"queue": queue,
"next_url": next_url,
@@ -387,35 +400,32 @@ def actions(request: HttpRequest, queue_name: str) -> HttpResponse:
next_url = reverse("queue_registry_jobs", args=[queue_name, "queued"])
action = request.POST.get("action", False)
- job_ids = request.POST.get("job_ids", False)
- if request.method != "POST" or not action or not job_ids:
+ job_names = request.POST.get("job_names", False)
+ if request.method != "POST" or not action or not job_names:
return redirect(next_url)
- job_ids = request.POST.getlist("job_ids")
+ job_names = request.POST.getlist("job_names")
if action == "delete":
- jobs = JobExecution.fetch_many(job_ids, connection=queue.connection)
+ jobs = JobModel.get_many(job_names, connection=queue.connection)
for job in jobs:
if job is None:
continue
# Remove job id from queue and delete the actual job
- queue.remove_job_id(job.id)
- job.delete()
- messages.info(request, f"You have successfully deleted {len(job_ids)} jobs!")
+ queue.delete_job(job.name)
+ messages.info(request, f"You have successfully deleted {len(job_names)} jobs!")
elif action == "requeue":
- jobs = JobExecution.fetch_many(job_ids, connection=queue.connection)
- for job in jobs:
- if job is None:
- continue
- job.requeue()
- messages.info(request, f"You have successfully re-queued {len(job_ids)} jobs!")
+ requeued_jobs_count = queue.requeue_jobs(*job_names)
+ messages.info(request, f"You have successfully re-queued {requeued_jobs_count}/{len(job_names)} jobs!")
elif action == "stop":
cancelled_jobs = 0
- jobs = JobExecution.fetch_many(job_ids, connection=queue.connection)
+ jobs = JobModel.get_many(job_names, connection=queue.connection)
for job in jobs:
if job is None:
continue
try:
- job.stop_execution(queue.connection)
- job.cancel()
+ command = StopJobCommand(job_name=job.name, worker_name=job.worker_name)
+ send_command(connection=queue.connection, command=command)
+
+ queue.cancel_job(job.name)
cancelled_jobs += 1
except Exception as e:
logger.warning(f"Could not stop job: {e}")
@@ -447,23 +457,25 @@ def job_action(request: HttpRequest, job_id: str, action: str) -> HttpResponse:
try:
if action == "requeue":
- job.requeue()
- messages.info(request, f"You have successfully re-queued {job.id}")
+ requeued_jobs_count = queue.requeue_jobs(job.name)
+ if requeued_jobs_count == 0:
+ messages.warning(request, f"Could not requeue {job.name}")
+ else:
+ messages.info(request, f"You have successfully re-queued {job.name}")
return redirect("job_details", job_id)
elif action == "delete":
# Remove job id from queue and delete the actual job
- queue.remove_job_id(job.id)
- job.delete()
- messages.info(request, "You have successfully deleted %s" % job.id)
+ queue.delete_job(job.name)
+ messages.info(request, f"You have successfully deleted {job.name}")
return redirect("queue_registry_jobs", queue.name, "queued")
elif action == "enqueue":
- job.delete(remove_from_queue=False)
+ queue.delete_job(job.name)
queue._enqueue_job(job)
- messages.info(request, "You have successfully enqueued %s" % job.id)
+ messages.info(request, f"You have successfully enqueued {job.name}")
return redirect("job_details", job_id)
elif action == "cancel":
- job.cancel()
- messages.info(request, "You have successfully enqueued %s" % job.id)
+ queue.cancel_job(job.name)
+ messages.info(request, "You have successfully enqueued %s" % job.name)
return redirect("job_details", job_id)
except InvalidJobOperation as e:
logger.warning(f"Could not perform action: {e}")
diff --git a/scheduler/worker/__init__.py b/scheduler/worker/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/scheduler/worker/commands/__init__.py b/scheduler/worker/commands/__init__.py
new file mode 100644
index 0000000..53d745f
--- /dev/null
+++ b/scheduler/worker/commands/__init__.py
@@ -0,0 +1,13 @@
+__all__ = [
+ "WorkerCommandsChannelListener",
+ "StopJobCommand",
+ "ShutdownCommand",
+ "KillWorkerCommand",
+ "UnknownCommandError",
+ "send_command",
+]
+
+from .kill_worker import KillWorkerCommand
+from .shutdown import ShutdownCommand
+from .stop_job import StopJobCommand
+from .worker_commands import WorkerCommandsChannelListener, UnknownCommandError, send_command
diff --git a/scheduler/worker/commands/kill_worker.py b/scheduler/worker/commands/kill_worker.py
new file mode 100644
index 0000000..c983941
--- /dev/null
+++ b/scheduler/worker/commands/kill_worker.py
@@ -0,0 +1,35 @@
+import errno
+import os
+import signal
+from typing import Optional
+
+from scheduler.broker_types import ConnectionType
+from scheduler.redis_models import WorkerModel
+from scheduler.settings import logger
+from scheduler.worker.commands.worker_commands import WorkerCommand
+
+
+class KillWorkerCommand(WorkerCommand):
+ """kill-worker command"""
+
+ command_name = "kill-worker"
+
+ def __init__(self, *args, **kwargs) -> None:
+ super().__init__(*args, **kwargs)
+ self.worker_pid: Optional[int] = None
+
+ def process_command(self, connection: ConnectionType) -> None:
+ logger.info("Received kill-worker command.")
+ worker_model = WorkerModel.get(self.worker_name, connection)
+ self.worker_pid = worker_model.pid
+ if self.worker_pid is None:
+ raise ValueError("Worker PID is not set")
+ logger.info(f"Killing job execution process {self.worker_pid}...")
+ try:
+ os.killpg(os.getpgid(self.worker_pid), signal.SIGKILL)
+ logger.info(f"Killed job execution process pid {self.worker_pid}")
+ except OSError as e:
+ if e.errno == errno.ESRCH:
+ logger.debug("Job execution process already dead") # "No such process" is fine with us
+ else:
+ raise
diff --git a/scheduler/worker/commands/shutdown.py b/scheduler/worker/commands/shutdown.py
new file mode 100644
index 0000000..1cf81be
--- /dev/null
+++ b/scheduler/worker/commands/shutdown.py
@@ -0,0 +1,17 @@
+import os
+import signal
+
+from scheduler.broker_types import ConnectionType
+from scheduler.settings import logger
+from scheduler.worker.commands.worker_commands import WorkerCommand
+
+
+class ShutdownCommand(WorkerCommand):
+ """shutdown command"""
+
+ command_name = "shutdown"
+
+ def process_command(self, connection: ConnectionType) -> None:
+ logger.info("Received shutdown command, sending SIGINT signal.")
+ pid = os.getpid()
+ os.kill(pid, signal.SIGINT)
diff --git a/scheduler/worker/commands/stop_job.py b/scheduler/worker/commands/stop_job.py
new file mode 100644
index 0000000..23e9b7c
--- /dev/null
+++ b/scheduler/worker/commands/stop_job.py
@@ -0,0 +1,35 @@
+import os
+from signal import SIGKILL
+from typing import Dict, Any
+
+from scheduler.broker_types import ConnectionType
+from scheduler.redis_models import WorkerModel
+from scheduler.settings import logger
+from scheduler.worker.commands.worker_commands import WorkerCommand
+
+
+class StopJobCommand(WorkerCommand):
+ """stop-job command"""
+
+ command_name = "stop-job"
+
+ def __init__(self, *args, job_name: str, worker_name: str, **kwargs) -> None:
+ super().__init__(*args, worker_name=worker_name, **kwargs)
+ self.job_name = job_name
+ if self.job_name is None:
+ raise ValueError("job_name is required")
+
+ def command_payload(self) -> Dict[str, Any]:
+ return super().command_payload(job_name=self.job_name)
+
+ def process_command(self, connection: ConnectionType) -> None:
+ logger.debug(f'Received command to stop job {self.job_name}')
+ worker_model = WorkerModel.get(self.worker_name, connection)
+ if worker_model is None:
+ logger.error(f'Worker {self.worker_name} not found')
+ return
+ if worker_model.current_job_name == self.job_name:
+ os.killpg(os.getpgid(worker_model.job_execution_process_pid), SIGKILL)
+ worker_model.set_field("stopped_job_name", self.job_name, connection)
+ else:
+ logger.info(f"{self.worker_name} not working on job {self.job_name}, command ignored.")
diff --git a/scheduler/worker/commands/suspend_worker.py b/scheduler/worker/commands/suspend_worker.py
new file mode 100644
index 0000000..e3fb39f
--- /dev/null
+++ b/scheduler/worker/commands/suspend_worker.py
@@ -0,0 +1,37 @@
+from scheduler.broker_types import ConnectionType
+from scheduler.redis_models import WorkerModel
+from scheduler.settings import logger
+from scheduler.worker.commands.worker_commands import WorkerCommand
+
+
+class SuspendWorkCommand(WorkerCommand):
+ """Suspend worker command"""
+
+ command_name = "suspend"
+
+ def process_command(self, connection: ConnectionType) -> None:
+ logger.debug(f'Received command to suspend worker {self.job_name}')
+ worker_model = WorkerModel.get(self.worker_name, connection)
+ if worker_model is None:
+ logger.warning(f'Worker {self.worker_name} not found')
+ if worker_model.is_suspended:
+ logger.warning(f"Worker {self.worker_name} already suspended")
+ return
+ worker_model.set_field("is_suspended", True, connection=connection)
+ logger.info(f"Worker {self.worker_name} suspended")
+
+class ResumeWorkCommand(WorkerCommand):
+ """Resume worker command"""
+
+ command_name = "resume"
+
+ def process_command(self, connection: ConnectionType) -> None:
+ logger.debug(f'Received command to resume worker {self.worker_name}')
+ worker_model = WorkerModel.get(self.worker_name, connection)
+ if worker_model is None:
+ logger.warning(f'Worker {self.worker_name} not found')
+ if not worker_model.is_suspended:
+ logger.warning(f"Worker {self.worker_name} not suspended and therefore can't be resumed")
+ return
+ worker_model.set_field("is_suspended", False, connection=connection)
+ logger.info(f"Worker {self.worker_name} resumed")
\ No newline at end of file
diff --git a/scheduler/worker/commands/worker_commands.py b/scheduler/worker/commands/worker_commands.py
new file mode 100644
index 0000000..0b74bad
--- /dev/null
+++ b/scheduler/worker/commands/worker_commands.py
@@ -0,0 +1,92 @@
+import json
+from abc import ABC
+from datetime import datetime, UTC
+from typing import Self, Type, Dict, Any
+
+from scheduler.broker_types import ConnectionType
+from scheduler.settings import logger
+
+_PUBSUB_CHANNEL_TEMPLATE: str = ":workers:pubsub:{}"
+
+
+class UnknownCommandError(Exception):
+ pass
+
+
+class WorkerCommand(ABC):
+ """Abstract class for commands to be sent to a worker and processed by worker"""
+
+ _registry: Dict[str, Type[Self]] = dict()
+ command_name: str = ""
+
+ def __init__(self, *args, worker_name: str, **kwargs) -> None:
+ self.worker_name = worker_name
+
+ def command_payload(self, **kwargs) -> Dict[str, Any]:
+ commands_channel = WorkerCommandsChannelListener._commands_channel(self.worker_name)
+ payload = {
+ "command": self.command_name,
+ "worker_name": self.worker_name,
+ "channel_name": commands_channel,
+ "created_at": datetime.now(tz=UTC).isoformat(),
+ }
+ if kwargs:
+ payload.update(kwargs)
+ return payload
+
+ def process_command(self, connection: ConnectionType) -> None:
+ raise NotImplementedError
+
+ @classmethod
+ def __init_subclass__(cls, *args, **kwargs):
+ if cls is WorkerCommand:
+ return
+ if not cls.command_name:
+ raise NotImplementedError(f"{cls.__name__} must have a name attribute")
+ WorkerCommand._registry[cls.command_name] = cls
+
+ @classmethod
+ def from_payload(cls, payload: Dict[str, Any]) -> Type[Self]:
+ command_name = payload.get("command")
+ command_class = WorkerCommand._registry.get(command_name)
+ if command_class is None:
+ raise UnknownCommandError(f"Invalid command: {command_name}")
+ return command_class(**payload)
+
+
+def send_command(connection: ConnectionType, command: WorkerCommand) -> None:
+ """Send a command to the worker"""
+ payload = command.command_payload()
+ connection.publish(payload["channel_name"], json.dumps(payload))
+
+
+class WorkerCommandsChannelListener(object):
+ def __init__(self, connection: ConnectionType, worker_name: str) -> None:
+ self.connection = connection
+ self.pubsub_channel_name = WorkerCommandsChannelListener._commands_channel(worker_name)
+
+ @staticmethod
+ def _commands_channel(worker_name: str) -> str:
+ return _PUBSUB_CHANNEL_TEMPLATE.format(worker_name)
+
+ def start(self):
+ """Subscribe to this worker's channel"""
+ logger.info(f"Subscribing to channel {self.pubsub_channel_name}")
+ self.pubsub = self.connection.pubsub()
+ self.pubsub.subscribe(**{self.pubsub_channel_name: self.handle_payload})
+ self.pubsub_thread = self.pubsub.run_in_thread(sleep_time=0.2, daemon=True)
+
+ def stop(self):
+ """Unsubscribe from pubsub channel"""
+ if self.pubsub_thread:
+ logger.info(f"Unsubscribing from channel {self.pubsub_channel_name}")
+ self.pubsub_thread.stop()
+ self.pubsub_thread.join()
+ self.pubsub.unsubscribe()
+ self.pubsub.close()
+
+ def handle_payload(self, payload: str) -> None:
+ """Handle commands"""
+ logger.debug(f"Received payload: {payload}")
+ command = WorkerCommand.from_payload(json.loads(payload["data"]))
+ command.process_command(self.connection)
diff --git a/scheduler/worker/scheduler.py b/scheduler/worker/scheduler.py
new file mode 100644
index 0000000..5304769
--- /dev/null
+++ b/scheduler/worker/scheduler.py
@@ -0,0 +1,178 @@
+import os
+import signal
+import time
+import traceback
+from datetime import datetime
+from enum import Enum
+from multiprocessing import Process
+from typing import List, Set, Optional, Sequence, Dict
+
+import django
+from django.apps import apps
+
+from scheduler.broker_types import ConnectionType, MODEL_NAMES
+from scheduler.helpers.queues import get_queue
+from scheduler.redis_models import SchedulerLock, JobModel
+from scheduler.redis_models.registry import ScheduledJobRegistry
+from scheduler.helpers.queues import Queue
+from scheduler.settings import SCHEDULER_CONFIG, logger
+from scheduler.helpers.utils import current_timestamp
+
+
+class SchedulerStatus(str, Enum):
+ STARTED = "started"
+ WORKING = "working"
+ STOPPED = "stopped"
+
+
+def _reschedule_all_jobs():
+ for model_name in MODEL_NAMES:
+ model = apps.get_model(app_label="scheduler", model_name=model_name)
+ enabled_jobs = model.objects.filter(enabled=True)
+ for item in enabled_jobs:
+ logger.debug(f"Rescheduling {str(item)}")
+ item.save()
+
+
+class WorkerScheduler:
+ def __init__(
+ self,
+ queues: Sequence[Queue],
+ connection: ConnectionType,
+ interval: Optional[int] = None,
+ ) -> None:
+ interval = interval or SCHEDULER_CONFIG.SCHEDULER_INTERVAL
+ self._queue_names = {queue.name for queue in queues}
+ self._scheduled_job_registries: List[ScheduledJobRegistry] = []
+ self.lock_acquisition_time = None
+ self._pool_class = connection.connection_pool.connection_class
+ self._pool_kwargs = connection.connection_pool.connection_kwargs.copy()
+ self._locks: Dict[str, SchedulerLock] = dict()
+ self.connection = connection
+ self.interval = interval
+ self._stop_requested = False
+ self._status = SchedulerStatus.STOPPED
+ self._process = None
+ self._pid: Optional[int] = None
+
+ @property
+ def pid(self) -> Optional[int]:
+ return self._pid
+
+ def _should_reacquire_locks(self) -> bool:
+ """Returns True if lock_acquisition_time is longer than 10 minutes ago"""
+ if not self.lock_acquisition_time:
+ return True
+ seconds_since = (datetime.now() - self.lock_acquisition_time).total_seconds()
+ return seconds_since > SCHEDULER_CONFIG.SCHEDULER_FALLBACK_PERIOD_SECS
+
+ def _acquire_locks(self) -> Set[str]:
+ """Returns names of queue it successfully acquires lock on"""
+ successful_locks = set()
+ pid = os.getpid()
+ logger.debug("Trying to acquire locks for %s", ", ".join(self._queue_names))
+ for queue_name in self._queue_names:
+ lock = SchedulerLock(queue_name)
+ if lock.acquire(pid, connection=self.connection, expire=self.interval + 60):
+ self._locks[queue_name] = lock
+
+ # Always reset _scheduled_job_registries when acquiring locks
+ self.lock_acquisition_time = datetime.now()
+ self._scheduled_job_registries = []
+ for queue_name in self._locks:
+ queue = get_queue(queue_name)
+ self._scheduled_job_registries.append(queue.scheduled_job_registry)
+
+ return successful_locks
+
+ def start(self, burst=False) -> None:
+ locks = self._acquire_locks()
+ if len(locks) == 0:
+ return
+ if burst:
+ self.enqueue_scheduled_jobs()
+ self.release_locks()
+ return
+ self._status = SchedulerStatus.STARTED
+ self._process = Process(target=run_scheduler, args=(self,), name="Scheduler")
+ self._process.start()
+ self._pid = self._process.pid
+
+ def _install_signal_handlers(self):
+ """Installs signal handlers for handling SIGINT and SIGTERM"""
+ signal.signal(signal.SIGINT, self.request_stop)
+ signal.signal(signal.SIGTERM, self.request_stop)
+
+ def request_stop(self, signum=None, frame=None):
+ """Toggle self._stop_requested that's checked on every loop"""
+ self._stop_requested = True
+
+ def heartbeat(self):
+ """Updates the TTL on scheduler keys and the locks"""
+ lock_keys= ", ".join(self._locks.keys())
+ logger.debug(f"Scheduler sending heartbeat to {lock_keys}")
+ with self.connection.pipeline() as pipeline:
+ for lock in self._locks.values():
+ lock.expire(self.connection, expire=self.interval + 60)
+ pipeline.execute()
+
+ def stop(self):
+ logger.info(f"Stopping scheduler, releasing locks for {', '.join(self._locks.keys())}...")
+ self.release_locks()
+ self._status = SchedulerStatus.STOPPED
+
+ def release_locks(self):
+ """Release acquired locks"""
+ with self.connection.pipeline() as pipeline:
+ for lock in self._locks.values():
+ lock.release(self.connection)
+ pipeline.execute()
+
+ def work(self) -> None:
+ logger.info(f"""Scheduler for {", ".join(self._queue_names)} started with PID {os.getpid()}""")
+ django.setup()
+ self._install_signal_handlers()
+
+ while True:
+ if self._stop_requested:
+ self.stop()
+ break
+
+ if self._should_reacquire_locks():
+ self._acquire_locks()
+
+ self.enqueue_scheduled_jobs()
+ self.heartbeat()
+ time.sleep(self.interval)
+
+ def enqueue_scheduled_jobs(self) -> None:
+ """Enqueue jobs whose timestamp is in the past"""
+ self._status = SchedulerStatus.WORKING
+ _reschedule_all_jobs()
+
+ for registry in self._scheduled_job_registries:
+ timestamp = current_timestamp()
+ job_ids = registry.get_jobs_to_schedule(timestamp)
+
+ if not job_ids:
+ continue
+
+ queue = get_queue(registry.name)
+
+ with self.connection.pipeline() as pipeline:
+ jobs = JobModel.get_many(job_ids, connection=self.connection)
+ for job in jobs:
+ if job is not None:
+ queue._enqueue_job(job, connection=pipeline, at_front=bool(job.at_front))
+ registry.delete(pipeline, job.name)
+ pipeline.execute()
+ self._status = SchedulerStatus.STARTED
+
+
+def run_scheduler(scheduler):
+ try:
+ scheduler.work()
+ except: # noqa
+ logger.error(f"Scheduler [PID {os.getpid()}] raised an exception.\n{traceback.format_exc()}")
+ raise
+ logger.info(f"Scheduler with PID %{os.getpid()} has stopped")
diff --git a/scheduler/worker/worker.py b/scheduler/worker/worker.py
new file mode 100644
index 0000000..1a995c6
--- /dev/null
+++ b/scheduler/worker/worker.py
@@ -0,0 +1,949 @@
+import contextlib
+import errno
+import math
+import os
+import random
+import signal
+import socket
+import sys
+import time
+import traceback
+import warnings
+from datetime import timedelta
+from enum import Enum
+from random import shuffle
+from types import FrameType
+from typing import Callable, List, Optional, Tuple, Any, Iterable
+from uuid import uuid4
+
+import scheduler
+from scheduler.broker_types import (ConnectionType, TimeoutErrorType, ConnectionErrorTypes, WatchErrorType,
+ ResponseErrorTypes)
+from scheduler.helpers.queues import get_queue
+from scheduler.redis_models import WorkerModel, JobModel, JobStatus, KvLock, DequeueTimeout
+from scheduler.settings import SCHEDULER_CONFIG, logger
+from .commands import WorkerCommandsChannelListener
+from .scheduler import WorkerScheduler
+from ..redis_models.worker import WorkerStatus
+
+try:
+ from signal import SIGKILL
+except ImportError:
+ from signal import SIGTERM as SIGKILL
+
+from contextlib import suppress
+
+from scheduler.helpers.queues import Queue, perform_job
+from scheduler.helpers.timeouts import (
+ JobExecutionMonitorTimeoutException,
+ JobTimeoutException,
+)
+from scheduler.helpers.utils import utcnow, current_timestamp
+
+try:
+ from setproctitle import setproctitle as setprocname
+except ImportError:
+
+ def setprocname(*args, **kwargs): # noqa
+ pass
+
+
+class StopRequested(Exception):
+ pass
+
+
+_signames = dict(
+ (getattr(signal, signame), signame) for signame in dir(signal) if signame.startswith("SIG") and "_" not in signame
+)
+
+
+def signal_name(signum):
+ try:
+ return signal.Signals(signum).name
+ except KeyError:
+ return "SIG_UNKNOWN"
+ except ValueError:
+ return "SIG_UNKNOWN"
+
+
+class DequeueStrategy(str, Enum):
+ DEFAULT = "default"
+ ROUND_ROBIN = "round_robin"
+ RANDOM = "random"
+
+
+class QueueLock(KvLock):
+ def __init__(self, queue_name: str) -> None:
+ super().__init__(f"_lock:queue:{queue_name}")
+
+
+class Worker:
+ queue_class = Queue
+
+ # factor to increase connection_wait_time in case of continuous connection failures.
+ exponential_backoff_factor = 2.0
+ # Max Wait time (in seconds) after which exponential_backoff_factor won't be applicable.
+ max_connection_wait_time = 60.0
+
+ def __init__(
+ self,
+ queues,
+ name: Optional[str] = None,
+ connection: Optional[ConnectionType] = None,
+ exception_handlers=None,
+ maintenance_interval: int = SCHEDULER_CONFIG.DEFAULT_MAINTENANCE_TASK_INTERVAL,
+ job_monitoring_interval=SCHEDULER_CONFIG.DEFAULT_JOB_MONITORING_INTERVAL,
+ dequeue_strategy: DequeueStrategy = DequeueStrategy.DEFAULT,
+ disable_default_exception_handler: bool = False,
+ fork_job_execution: bool = True,
+ ): # noqa
+ self.fork_job_execution = fork_job_execution
+ self.job_monitoring_interval = job_monitoring_interval
+ self.maintenance_interval = maintenance_interval
+
+ connection = self._set_connection(connection)
+ self.connection = connection
+
+ self.version = scheduler.__version__
+ self.python_version = sys.version
+
+ self.queues = [
+ (Queue(name=q, connection=connection) if isinstance(q, str) else q)
+ for q in _ensure_list(queues)
+ ]
+ self._model: WorkerModel
+ self.name: str = name or uuid4().hex
+ self._ordered_queues = self.queues[:]
+ self._exc_handlers: List[Callable] = []
+
+ self._is_job_execution_process: bool = False
+ self._stop_requested: bool = False
+
+ self.scheduler: Optional[WorkerScheduler] = None
+ self._command_listener = WorkerCommandsChannelListener(connection, self.name)
+ self._dequeue_strategy = dequeue_strategy
+
+ self.disable_default_exception_handler = disable_default_exception_handler
+ self.hostname: Optional[str] = socket.gethostname()
+ self.pid: Optional[int] = os.getpid()
+ self.ip_address = _get_ip_address_from_connection(self.connection, self.name)
+
+ if isinstance(exception_handlers, (list, tuple)):
+ for handler in exception_handlers:
+ self.push_exc_handler(handler)
+ elif exception_handlers is not None:
+ self.push_exc_handler(exception_handlers)
+
+ @property
+ def should_run_maintenance_tasks(self):
+ """Maintenance tasks should run on first startup or every 10 minutes."""
+ if self._model.last_cleaned_at is None:
+ return True
+ if (utcnow() - self._model.last_cleaned_at) > timedelta(seconds=self.maintenance_interval):
+ return True
+ return False
+
+ def _set_connection(self, connection: ConnectionType) -> ConnectionType:
+ """Configures the Broker connection to have a socket timeout.
+ This should timouet the connection in case any specific command hangs at any given time (eg. BLPOP).
+ If the connection provided already has a `socket_timeout` defined, skips.
+
+ :param connection: Broker connection to configure.
+ """
+ current_socket_timeout = connection.connection_pool.connection_kwargs.get("socket_timeout")
+ if current_socket_timeout is None:
+ timeout_config = {"socket_timeout": self.connection_timeout}
+ connection.connection_pool.connection_kwargs.update(timeout_config)
+ return connection
+
+ def clean_registries(self):
+ """Runs maintenance jobs on each Queue's registries."""
+ for queue in self.queues:
+ # If there are multiple workers running, we only want 1 worker
+ # to run clean_registries().
+ queue_lock = QueueLock(self.name)
+ if queue_lock.acquire(1, expire=899, connection=self.connection):
+ logger.info(f"Cleaning registries for queue: {queue.name}")
+ queue.clean_registries()
+ WorkerModel.cleanup(queue.connection, queue.name)
+ queue_lock.release(self.connection)
+ self._model.last_cleaned_at = utcnow()
+
+ def _install_signal_handlers(self):
+ """Installs signal handlers for handling SIGINT and SIGTERM gracefully."""
+ signal.signal(signal.SIGINT, self.request_stop)
+ signal.signal(signal.SIGTERM, self.request_stop)
+
+ def work(
+ self,
+ burst: bool = False,
+ max_jobs: Optional[int] = None,
+ max_idle_time: Optional[int] = None,
+ with_scheduler: bool = True,
+ ) -> bool:
+ """Starts the work loop.
+
+ Pops and performs all jobs on the current list of queues. When all
+ queues are empty, block and wait for new jobs to arrive on any of the
+ queues, unless `burst` mode is enabled.
+ If `max_idle_time` is provided, worker will die when it's idle for more than the provided value.
+
+ The return value indicates whether any jobs were processed.
+
+ :param burst: Whether to work on burst mode. Defaults to False.
+ :param max_jobs: Max number of jobs. Defaults to None.
+ :param max_idle_time: Max seconds for a worker to be idle. Defaults to None.
+ :param with_scheduler: Whether to run the scheduler in a separate process. Defaults to True.
+ :return: Whether any jobs were processed.
+ """
+ self.bootstrap()
+ if with_scheduler:
+ self._start_scheduler(burst)
+
+ self._install_signal_handlers()
+ try:
+ while True:
+ self._check_for_suspension(burst)
+
+ if self.should_run_maintenance_tasks:
+ self.run_maintenance_tasks()
+
+ if self._stop_requested:
+ logger.info(f"Worker {self.name}: stopping on request")
+ break
+
+ timeout = None if burst else (SCHEDULER_CONFIG.DEFAULT_WORKER_TTL - 15)
+ job, queue = self.dequeue_job_and_maintain_ttl(timeout, max_idle_time)
+ if job is None:
+ if burst:
+ logger.info(f"Worker {self.name}: done, quitting")
+ elif max_idle_time is not None:
+ logger.info(f"Worker {self.name}: idle for {max_idle_time} seconds, quitting")
+ break
+
+ self.execute_job(job, queue)
+ with self.connection.pipeline() as pipeline:
+ self._model.heartbeat(pipeline)
+ self._model.completed_jobs += 1
+ self._model.save(pipeline)
+ pipeline.execute()
+ if max_jobs is not None and self._model.completed_jobs >= max_jobs:
+ logger.info(f"Worker {self.name}: finished executing {self._model.completed_jobs} jobs, quitting")
+ break
+
+ except TimeoutErrorType:
+ logger.error(f"Worker {self.name}: Redis connection timeout, quitting...")
+
+ except StopRequested:
+ logger.info(f"Worker {self.name}: Worker was requested to stop, quitting")
+ pass
+
+ except SystemExit:
+ # Cold shutdown detected
+ raise
+
+ except Exception as e:
+ logger.error(f"Worker {self.name}: Exception in work loop", exc_info=True)
+ logger.error(f"Worker {self.name}: found an unhandled exception, quitting...", exc_info=True)
+ finally:
+ self.teardown()
+ return bool(self._model.completed_jobs)
+
+ def handle_job_failure(self, job: JobModel, queue: Queue, exc_string=""):
+ """
+ Handles the failure or an executing job by:
+ 1. Setting the job status to failed
+ 2. Removing the job from StartedJobRegistry
+ 3. Setting the workers current job to None
+ 4. Add the job to FailedJobRegistry
+ `save_exc_to_job` should only be used for testing purposes
+ """
+ logger.debug(f"Handling failed execution of job {job.name}")
+ job_is_stopped = self._model.get_field("stopped_job_name", self.connection) == job.name
+ with self.connection.pipeline() as pipeline:
+
+ # check whether a job was stopped intentionally and set the job
+ # status appropriately if it was this job.
+ retry = job.retries_left and job.retries_left > 0 and not job_is_stopped
+
+ if job_is_stopped:
+ job.set_status(JobStatus.STOPPED, connection=pipeline)
+ self._model.set_field("stopped_job_name", None, self.connection)
+ else:
+ # Requeue/reschedule if retry is configured, otherwise
+ if not retry:
+ job.set_status(JobStatus.FAILED, connection=pipeline)
+
+ queue.started_job_registry.delete(connection=pipeline, job_name=job.name)
+
+ if not self.disable_default_exception_handler and not retry:
+ queue.job_handle_failure(job, exc_string, connection=pipeline)
+ with suppress(ConnectionErrorTypes):
+ pipeline.execute()
+
+ self._model.current_job_name = None
+ self._model.current_job_working_time = 0
+ self._model.failed_job_count += 1
+ if job.started_at and job.ended_at:
+ self._model.total_working_time += (job.ended_at - job.started_at).total_seconds()
+ self._model.save(connection=pipeline)
+
+ if retry:
+ queue.retry_job(job, pipeline)
+
+ try:
+ pipeline.execute()
+ except Exception:
+ # Ensure that custom exception handlers are called even if the Broker is down
+ pass
+
+ def _start_scheduler(self, burst: bool = False):
+ """Starts the scheduler process.
+ This is specifically designed to be run by the worker when running the `work()` method.
+ Instanciates the RQScheduler and tries to acquire a lock.
+ If the lock is acquired, start scheduler.
+ If worker is on burst mode just enqueues scheduled jobs and quits,
+ otherwise, starts the scheduler in a separate process.
+
+ Args:
+ burst (bool, optional): Whether to work on burst mode. Defaults to False.
+ """
+ self.scheduler = WorkerScheduler(self.queues, connection=self.connection)
+ self.scheduler.start(burst=burst)
+ self._model.scheduler_pid = self.scheduler.pid
+
+ def bootstrap(self, with_command_listener: bool = True):
+ """Bootstraps the worker.
+ Runs the basic tasks that should run when the worker actually starts working.
+ Used so that new workers can focus on the work loop implementation rather
+ than the full bootstraping process.
+ """
+ self.register_birth()
+ logger.info(f"Worker {self.name} started with PID {os.getpid()}")
+ if with_command_listener:
+ self._command_listener.start()
+ qnames = [queue.name for queue in self.queues]
+ logger.info(f"""*** Listening to queues {', '.join(qnames)}...""")
+
+ def _check_for_suspension(self, burst: bool) -> None:
+ """Check to see if workers have been suspended by `rq suspend`"""
+ before_state = None
+ notified = False
+
+ while not self._stop_requested and self._model.get_field("is_suspended", self.connection):
+ if burst:
+ logger.info("Suspended in burst mode, exiting")
+ logger.info("Note: There could still be unfinished jobs on the queue")
+ raise StopRequested
+
+ if not notified:
+ logger.info("Worker suspended, trigger ResumeCommand")
+ before_state = self._model.state
+ self._model.set_field("state", WorkerStatus.SUSPENDED, connection=self.connection)
+ notified = True
+ time.sleep(1)
+
+ if before_state:
+ self._model.set_field("state", before_state, connection=self.connection)
+
+ def run_maintenance_tasks(self):
+ """
+ Runs periodic maintenance tasks, these include:
+ 1. Check if scheduler should be started. This check should not be run
+ on first run since worker.work() already calls
+ `scheduler.enqueue_scheduled_jobs()` on startup.
+ 2. Cleaning registries
+
+ No need to try to start scheduler on first run
+ """
+ if self._model.last_cleaned_at and self.scheduler and not self.scheduler.pid:
+ self.scheduler.start(burst=False)
+ self.clean_registries()
+
+ def dequeue_job_and_maintain_ttl(
+ self, timeout: Optional[int], max_idle_time: Optional[int] = None
+ ) -> Tuple[JobModel, Queue]:
+ """Dequeues a job while maintaining the TTL.
+ :param timeout: The timeout for the dequeue operation.
+ :param max_idle_time: The maximum idle time for the worker.
+ :returns: A tuple with the job and the queue.
+ """
+ result = None
+ qnames = ",".join([queue.name for queue in self.queues])
+
+ self._model.set_field("state", WorkerStatus.IDLE, connection=self.connection)
+ self.procline(f"Listening on {qnames}")
+ logger.debug(f"*** Listening on {qnames}...")
+ connection_wait_time = 1.0
+ idle_since = utcnow()
+ idle_time_left = max_idle_time
+ job, queue = None, None
+ while True:
+ try:
+ self._model.heartbeat(self.connection)
+
+ if self.should_run_maintenance_tasks:
+ self.run_maintenance_tasks()
+
+ if timeout is not None and idle_time_left is not None:
+ timeout = min(timeout, idle_time_left)
+
+ logger.debug(f"Dequeueing jobs on queues {qnames} and timeout {timeout}")
+ job, queue = Queue.dequeue_any(self._ordered_queues, timeout, connection=self.connection)
+ if job is not None:
+ self.reorder_queues(reference_queue=queue)
+ logger.debug(f"Dequeued job {job.name} from {queue.name}")
+ logger.info(f"{queue.name}: {job.name}")
+ break
+ except DequeueTimeout:
+ if max_idle_time is not None:
+ idle_for = (utcnow() - idle_since).total_seconds()
+ idle_time_left = math.ceil(max_idle_time - idle_for)
+ if idle_time_left <= 0:
+ break
+ except ConnectionErrorTypes as conn_err:
+ logger.error(f"Could not connect to Broker: {conn_err} Retrying in {connection_wait_time} seconds...")
+ time.sleep(connection_wait_time)
+ connection_wait_time *= self.exponential_backoff_factor
+ connection_wait_time = min(connection_wait_time, self.max_connection_wait_time)
+
+ self._model.heartbeat(self.connection)
+ return job, queue
+
+ @property
+ def connection_timeout(self) -> int:
+ return SCHEDULER_CONFIG.DEFAULT_WORKER_TTL - 5
+
+ def procline(self, message):
+ """Changes the current procname for the process.
+
+ This can be used to make `ps -ef` output more readable.
+ """
+ setprocname(f"{self._model._key}: {message}")
+
+ def register_birth(self):
+ """Registers its own birth."""
+ logger.debug(f"Registering birth of worker {self.name}")
+ worker_model = WorkerModel.get(self.name, connection=self.connection)
+ if worker_model is not None and worker_model.get_field("death", self.connection) is None:
+ raise ValueError(f"There exists an active worker named {self.name!r} already")
+ now = utcnow()
+ self._model = WorkerModel(
+ name=self.name,
+ queue_names=[queue.name for queue in self.queues],
+ birth=now,
+ last_heartbeat=now,
+ pid=self.pid,
+ hostname=self.hostname,
+ ip_address=self.ip_address,
+ version=self.version,
+ python_version=self.python_version,
+ state=WorkerStatus.STARTED,
+ )
+ self._model.save(self.connection)
+
+ def kill_job_execution_process(self, sig: signal.Signals = SIGKILL):
+ """Kill the job execution process but catch "No such process" error has the job execution process could already
+ be dead.
+
+ :param sig: Optional, Defaults to SIGKILL.
+ """
+ try:
+ os.killpg(os.getpgid(self._model.job_execution_process_pid), sig)
+ logger.info(f"Killed job execution process pid {self._model.job_execution_process_pid}")
+ except OSError as e:
+ if e.errno == errno.ESRCH:
+ # "No such process" is fine with us
+ logger.debug("Job execution process already dead")
+ else:
+ raise
+
+ def wait_for_job_execution_process(self) -> Tuple[Optional[int], Optional[int], Optional["struct_rusage"]]:
+ """Waits for the job execution process to complete.
+ Uses `0` as argument as to include "any child in the process group of the current process".
+ """
+ pid = stat = rusage = None
+ with contextlib.suppress(ChildProcessError): # ChildProcessError: [Errno 10] No child processes
+ pid, stat, rusage = os.wait4(self._model.job_execution_process_pid, 0)
+ return pid, stat, rusage
+
+ def request_force_stop(self, signum: int, frame: Optional[FrameType]):
+ """Terminates the application (cold shutdown).
+
+ :param signum: Signal number
+ :param frame: Frame
+ :raises SystemExit: SystemExit
+ """
+ # When a worker is run through a worker pool, it may receive duplicate signals.
+ # One is sent by the pool when it calls `pool.stop_worker()` and another is sent by the OS
+ # when a user hits Ctrl+C. In this case, if we receive the second signal within 1 second, we ignore it.
+ if (self._model.shutdown_requested_date is not None
+ and (utcnow() - self._model.shutdown_requested_date) < timedelta(seconds=1)):
+ logger.debug("Shutdown signal ignored, received twice in less than 1 second")
+ return
+
+ logger.warning("Cold shut down")
+
+ # Take down the job execution process with the worker
+ if self._model.job_execution_process_pid:
+ logger.debug(f"Taking down job execution process {self._model.job_execution_process_pid} with me")
+ self.kill_job_execution_process()
+ self.wait_for_job_execution_process()
+ raise SystemExit()
+
+ def request_stop(self, signum, frame):
+ """Stops the current worker loop but waits for child processes to
+ end gracefully (warm shutdown).
+
+ Args:
+ signum (Any): Signum
+ frame (Any): Frame
+ """
+ logger.debug(f"Got signal {signal_name(signum)}")
+ self._model.shutdown_requested_date = utcnow()
+
+ signal.signal(signal.SIGINT, self.request_force_stop)
+ signal.signal(signal.SIGTERM, self.request_force_stop)
+
+ self.handle_warm_shutdown_request()
+ self._shutdown()
+
+ def _shutdown(self):
+ """
+ If shutdown is requested in the middle of a job, wait until finish before shutting down and save the request.
+ """
+ if self._model.state == WorkerStatus.BUSY:
+ self._stop_requested = True
+ self._model.shutdown_requested_date = utcnow()
+
+ logger.debug(
+ "Stopping after current job execution process is finished. Press Ctrl+C again for a cold shutdown.")
+ if self.scheduler:
+ self.stop_scheduler()
+ else:
+ if self.scheduler:
+ self.stop_scheduler()
+ raise StopRequested()
+
+ def handle_warm_shutdown_request(self):
+ logger.info(f"Worker {self.name} [PID {self.pid}]: warm shut down requested")
+
+ def reorder_queues(self, reference_queue: Queue):
+ """Reorder the queues according to the strategy.
+ As this can be defined both in the `Worker` initialization or in the `work` method,
+ it doesn't take the strategy directly, but rather uses the private `_dequeue_strategy` attribute.
+
+ :param reference_queue: The queues to reorder
+ """
+ if self._dequeue_strategy is None:
+ self._dequeue_strategy = DequeueStrategy.DEFAULT
+
+ if self._dequeue_strategy not in [e.value for e in DequeueStrategy]:
+ raise ValueError(
+ f"""Dequeue strategy should be one of {", ".join([e.value for e in DequeueStrategy])}"""
+ )
+ if self._dequeue_strategy == DequeueStrategy.DEFAULT:
+ return
+ if self._dequeue_strategy == DequeueStrategy.ROUND_ROBIN:
+ pos = self._ordered_queues.index(reference_queue)
+ self._ordered_queues = self._ordered_queues[pos + 1:] + self._ordered_queues[: pos + 1]
+ return
+ if self._dequeue_strategy == DequeueStrategy.RANDOM:
+ shuffle(self._ordered_queues)
+ return
+
+ def teardown(self):
+ if not self._is_job_execution_process:
+ if self.scheduler:
+ self.stop_scheduler()
+ self._model.delete(self.connection)
+ self._command_listener.stop()
+
+ def stop_scheduler(self):
+ """Stop the scheduler process.
+ Will send the kill signal to the scheduler process,
+ if there's an OSError, just passes and `join()`'s the scheduler process, waiting for the process to finish.
+ """
+ if self.scheduler._process and self.scheduler._process.pid:
+ try:
+ os.kill(self.scheduler._process.pid, signal.SIGTERM)
+ except OSError:
+ pass
+ self.scheduler._process.join()
+
+ def refresh(self):
+ """Refreshes the worker data.
+ It will get the data from the datastore and update the Worker's attributes
+ """
+ self._model = WorkerModel.get(self.name, connection=self.connection)
+ if self._model is not None:
+ self.queues = [
+ Queue(name=queue_name, connection=self.connection)
+ for queue_name in self._model.queue_names
+ ]
+
+ def fork_job_execution_process(self, job: JobModel, queue: Queue) -> None:
+ """Spawns a job execution process to perform the actual work and passes it a job.
+ This is where the `fork()` actually happens.
+
+ :param job: The job to be executed
+ :param queue: The queue from which the job was dequeued
+ """
+ child_pid = os.fork()
+ os.environ["RQ_WORKER_ID"] = self.name
+ os.environ["RQ_JOB_ID"] = job.name
+ if child_pid == 0: # Child process/Job executor process to run the job
+ os.setsid()
+ self.job_executor_process(job, queue)
+ os._exit(0) # just in case
+ else: # Parent worker process
+ self._model.set_field("job_execution_process_pid", child_pid, self.connection)
+ self.procline("Forked {0} at {1}".format(child_pid, time.time()))
+
+ def get_heartbeat_ttl(self, job: JobModel) -> int:
+ """Get's the TTL for the next heartbeat.
+ :param job: The Job
+ :return: The heartbeat TTL
+ """
+ if job.timeout and job.timeout > 0:
+ remaining_execution_time = int(job.timeout - self._model.current_job_working_time)
+ return min(remaining_execution_time, self.job_monitoring_interval) + 60
+ else:
+ return self.job_monitoring_interval + 60
+
+ def monitor_job_execution_process(self, job: JobModel, queue: Queue) -> None:
+ """The worker will monitor the job execution process and make sure that it either executes successfully or the
+ status of the job is set to failed
+
+ :param job: The Job
+ :param queue: The Queue
+ """
+ retpid = ret_val = rusage = None
+ job.started_at = utcnow()
+ while True:
+ try:
+ with SCHEDULER_CONFIG.DEATH_PENALTY_CLASS(self.job_monitoring_interval,
+ JobExecutionMonitorTimeoutException):
+ retpid, ret_val, rusage = self.wait_for_job_execution_process()
+ break
+ except JobExecutionMonitorTimeoutException:
+ # job execution process has not exited yet and is still running. Send a heartbeat to keep the worker alive.
+ self._model.set_current_job_working_time((utcnow() - job.started_at).total_seconds(), self.connection)
+
+ # Kill the job from this side if something is really wrong (interpreter lock/etc).
+ if job.timeout != -1 and self.current_job_working_time > (job.timeout + 60): # type: ignore
+ self._model.heartbeat(self.connection, self.job_monitoring_interval + 60)
+ self.kill_job_execution_process()
+ self.wait_for_job_execution_process()
+ break
+
+ self.maintain_heartbeats(job, queue)
+
+ except OSError as e:
+ # In case we encountered an OSError due to EINTR (which is
+ # caused by a SIGINT or SIGTERM signal during
+ # os.waitpid()), we simply ignore it and enter the next
+ # iteration of the loop, waiting for the child to end. In
+ # any other case, this is some other unexpected OS error,
+ # which we don't want to catch, so we re-raise those ones.
+ if e.errno != errno.EINTR:
+ raise
+ # Send a heartbeat to keep the worker alive.
+ self._model.heartbeat(self.connection)
+
+ self._model.current_job_working_time = 0
+ self._model.job_execution_process_pid = 0
+ self._model.save(connection=self.connection)
+ if ret_val == os.EX_OK: # The process exited normally.
+ return
+
+ job_status = job.get_status(self.connection)
+
+ if job_status is None: # Job completed and its ttl has expired
+ return
+ elif self._model.get_field("stopped_job_name", self.connection) == job.name:
+ # job execution process killed deliberately
+ logger.warning("Job stopped by user, moving job to FailedJobRegistry")
+ if job.stopped_callback:
+ job.stopped_callback()
+ self.handle_job_failure(job, queue=queue,
+ exc_string="Job stopped by user, job execution process terminated.")
+ elif job_status not in [JobStatus.FINISHED, JobStatus.FAILED]:
+ if not job.ended_at:
+ job.ended_at = utcnow()
+
+ # Unhandled failure: move the job to the failed queue
+ signal_msg = f" (signal {os.WTERMSIG(ret_val)})" if ret_val and os.WIFSIGNALED(ret_val) else ""
+ exc_string = f"job-execution-process terminated unexpectedly; waitpid returned {ret_val}{signal_msg}; "
+ logger.warning(f"Moving job to FailedJobRegistry ({exc_string})")
+
+ self.handle_job_failure(job, queue=queue, exc_string=exc_string)
+
+ def execute_job(self, job: JobModel, queue: Queue):
+ """Spawns a job execution process to perform the actual work and passes it a job.
+ The worker will wait for the job execution process and make sure it executes within the given timeout bounds, or
+ will end the job execution process with SIGALRM.
+ """
+ if self.fork_job_execution:
+ self._model.set_field("state", WorkerStatus.BUSY, connection=self.connection)
+ self.fork_job_execution_process(job, queue)
+ self.monitor_job_execution_process(job, queue)
+ self._model.set_field("state", WorkerStatus.IDLE, connection=self.connection)
+ else:
+ self._model.set_field("state", WorkerStatus.BUSY, connection=self.connection)
+ self.perform_job(job, queue)
+ self._model.set_field("state", WorkerStatus.IDLE, connection=self.connection)
+
+ def maintain_heartbeats(self, job: JobModel, queue: Queue):
+ """Updates worker and job's last heartbeat field."""
+ with self.connection.pipeline() as pipeline:
+ self._model.heartbeat(self.connection, self.job_monitoring_interval + 60)
+ ttl = self.get_heartbeat_ttl(job)
+
+ started_job_registry = queue.started_job_registry
+ started_job_registry.add(pipeline, current_timestamp() + ttl, self.name, update_existing_only=False)
+ results = pipeline.execute()
+ if results[2] == 1:
+ job.delete(self.connection)
+
+ def job_executor_process(self, job: JobModel, queue: Queue):
+ """This is the entry point of the newly spawned job execution process.
+ After fork()'ing, assure we are generating random sequences that are different from the worker.
+
+ os._exit() is the way to exit from child processes after a fork(), in contrast to the regular sys.exit()
+ """
+ random.seed()
+ self.setup_job_execution_process_signals()
+ self._is_job_execution_process = True
+ try:
+ self.perform_job(job, queue)
+ except: # noqa
+ os._exit(1)
+ os._exit(0)
+
+ def setup_job_execution_process_signals(self):
+ """Setup signal handing for the newly spawned job execution process
+
+ Always ignore Ctrl+C in the job execution process, as it might abort the currently running job.
+
+ The main worker catches the Ctrl+C and requests graceful shutdown after the current work is done.
+ When cold shutdown is requested, it kills the current job anyway.
+ """
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ def prepare_job_execution(self, job: JobModel):
+ """Performs misc bookkeeping like updating states prior to job execution."""
+ logger.debug(f"Preparing for execution of Job ID {job.name}")
+ with self.connection.pipeline() as pipeline:
+ self._model.current_job_name = job.name
+ self._model.current_job_working_time = 0
+
+ heartbeat_ttl = self.get_heartbeat_ttl(job)
+ self._model.heartbeat(self.connection, heartbeat_ttl)
+ queue = get_queue(job.queue_name, connection=self.connection)
+ job.prepare_for_execution(self.name, queue.started_job_registry, connection=pipeline)
+ pipeline.execute()
+ logger.debug("Job preparation finished.")
+
+ self.procline(f"Processing {job.func_name} from {job.queue_name} since {time.time()}")
+
+ def handle_job_success(self, job: JobModel, return_value: Any, queue: Queue):
+ """Handles the successful execution of certain job.
+ It will remove the job from the `StartedJobRegistry`, adding it to the `SuccessfulJobRegistry`,
+ and run a few maintenance tasks including:
+ - Resting the current job ID
+ - Enqueue dependents
+ - Incrementing the job count and working time
+ - Handling of the job successful execution
+
+ Runs within a loop with the `watch` method so that protects interactions with dependents keys.
+
+ :param job: The job that was successful.
+ :param queue: The queue
+ """
+ logger.debug(f"Handling successful execution of job {job.name}")
+ if job.success_callback is not None:
+ success_callback_timeout = job.success_callback_timeout or SCHEDULER_CONFIG.CALLBACK_TIMEOUT
+ logger.debug(f"Running success callbacks for {job.name}")
+ job.success_callback(job, self.connection, return_value)
+ with self.connection.pipeline() as pipeline:
+ while True:
+ try:
+ if not pipeline.explicit_transaction:
+ # enqueue_dependents didn't call multi after all!
+ # We have to do it ourselves to make sure everything runs in a transaction
+ pipeline.multi()
+
+ self._model.current_job_name = None
+ self._model.successful_job_count += 1
+ self._model.total_working_time += (job.ended_at - job.started_at).total_seconds()
+ self._model.save(connection=pipeline)
+ if job.result_ttl != 0:
+ logger.debug(f"Saving successful execution result for job {job.name}")
+ queue.job_handle_success(
+ job, result=return_value, result_ttl=job.result_ttl, connection=pipeline)
+
+ job.expire(job.result_ttl, connection=pipeline)
+ logger.debug(f"Removing job {job.name} from StartedJobRegistry")
+ queue.started_job_registry.delete(pipeline, job.name)
+
+ pipeline.execute()
+ logger.debug(f"Finished handling successful execution of job {job.name}")
+ break
+ except WatchErrorType:
+ continue
+
+ def perform_job(self, job: JobModel, queue: Queue) -> bool:
+ """Performs the actual work of a job.
+ Called from the process executing the job (forked job execution process).
+
+ :param job: The job to perform
+ :param queue: The queue the job was dequeued from
+ :returns: True after finished.
+ """
+ logger.debug("Started Job Registry set.")
+
+ try:
+ self.prepare_job_execution(job)
+
+ job.started_at = utcnow()
+ timeout = job.timeout or SCHEDULER_CONFIG.DEFAULT_JOB_TIMEOUT
+ with SCHEDULER_CONFIG.DEATH_PENALTY_CLASS(timeout, JobTimeoutException, job_id=job.name):
+ logger.debug("Performing Job...")
+ rv = perform_job(job, self.connection)
+ logger.debug(f"Finished performing Job ID {job.name}")
+
+ job.ended_at = utcnow()
+ job.last_heartbeat = utcnow()
+ job.save(connection=self.connection)
+ self.handle_job_success(job=job, return_value=rv, queue=queue)
+ except: # NOQA
+ logger.debug(f"Job {job.name} raised an exception.")
+ job.ended_at = utcnow()
+ exc_info = sys.exc_info()
+ exc_string = "".join(traceback.format_exception(*exc_info))
+
+ try:
+ job.last_heartbeat = utcnow()
+ job.save(connection=self.connection)
+ job = JobModel.get(job.name, connection=self.connection)
+ if job is not None and job.failure_callback is not None:
+ logger.debug(f"Running failure callbacks for {job.name}")
+ try:
+ job.failure_callback(self, self.connection, traceback.extract_stack())
+ except Exception: # noqa
+ logger.exception(f"Job {self.name}: error while executing failure callback")
+ raise
+ except: # noqa
+ exc_info = sys.exc_info()
+ exc_string = "".join(traceback.format_exception(*exc_info))
+
+ self.handle_job_failure(job=job, exc_string=exc_string, queue=queue)
+ self.handle_exception(job, *exc_info)
+ return False
+
+ logger.info(f"{job.queue_name}: Job OK ({job.name})")
+ if rv is not None:
+ logger.debug(f"Result: {str(rv)}")
+
+ return True
+
+ def handle_exception(self, job: JobModel, *exc_info):
+ """Walks the exception handler stack to delegate exception handling.
+ If the job cannot be deserialized, it will raise when func_name or
+ the other properties are accessed, which will stop exceptions from
+ being properly logged, so we guard against it here.
+ """
+ logger.debug(f"Handling exception for {job.name}.")
+ exc_string = "".join(traceback.format_exception(*exc_info))
+
+ extra = {
+ "func": job.func_name,
+ "arguments": job.args,
+ "kwargs": job.kwargs,
+ Queue: job.queue_name,
+ "job_name": job.name
+ }
+ func_name = job.func_name
+
+ # func_name
+ logger.error(f"[Job {job.name}]: exception raised while executing ({func_name})\n{exc_string}", extra=extra)
+
+ for handler in self._exc_handlers:
+ logger.debug(f"Invoking exception handler {handler}")
+ fallthrough = handler(job, *exc_info)
+
+ # Only handlers with explicit return values should disable further
+ # exc handling, so interpret a None return value as True.
+ if fallthrough is None:
+ fallthrough = True
+
+ if not fallthrough:
+ break
+
+ def push_exc_handler(self, handler_func):
+ """Pushes an exception handler onto the exc handler stack."""
+ self._exc_handlers.append(handler_func)
+
+ def pop_exc_handler(self):
+ """Pops the latest exception handler off of the exc handler stack."""
+ return self._exc_handlers.pop()
+
+
+class SimpleWorker(Worker):
+ def execute_job(self, job: JobModel, queue: Queue):
+ """Execute job in same thread/process, do not fork()"""
+ self._model.set_field("state", WorkerStatus.BUSY, connection=self.connection)
+ self.perform_job(job, queue)
+ self._model.set_field("state", WorkerStatus.IDLE, connection=self.connection)
+
+ def get_heartbeat_ttl(self, job: JobModel) -> int:
+ """The job timeout + 60 seconds
+
+ :param job: The Job
+ :returns: TTL or self.worker_ttl if the job has no timeout (-1)
+ """
+ if job.timeout == -1:
+ return self.worker_ttl
+ else:
+ return (job.timeout or self.worker_ttl) + 60
+
+
+class RoundRobinWorker(Worker):
+ """Modified version of Worker that dequeues jobs from the queues using a round-robin strategy."""
+
+ def reorder_queues(self, reference_queue):
+ pos = self._ordered_queues.index(reference_queue)
+ self._ordered_queues = self._ordered_queues[pos + 1:] + self._ordered_queues[: pos + 1]
+
+
+class RandomWorker(Worker):
+ """Modified version of Worker that dequeues jobs from the queues using a random strategy."""
+
+ def reorder_queues(self, reference_queue):
+ shuffle(self._ordered_queues)
+
+
+def _get_ip_address_from_connection(connection: ConnectionType, client_name: str) -> str:
+ try:
+ connection.client_setname(client_name)
+ except ResponseErrorTypes:
+ warnings.warn("CLIENT SETNAME command not supported, setting ip_address to unknown", Warning)
+ return "unknown"
+ client_adresses = [client["addr"] for client in connection.client_list() if client["name"] == client_name]
+ if len(client_adresses) > 0:
+ return client_adresses[0]
+ else:
+ warnings.warn("CLIENT LIST command not supported, setting ip_address to unknown", Warning)
+ return "unknown"
+
+
+def _ensure_list(obj: Any) -> List:
+ """When passed an iterable of objects, does nothing, otherwise, it returns a list with just that object in it.
+
+ :param obj: The object to ensure is a list
+ :return:
+ """
+ is_nonstring_iterable = isinstance(obj, Iterable) and not isinstance(obj, str)
+ return obj if is_nonstring_iterable else [obj]
diff --git a/testproject/testproject/settings.py b/testproject/testproject/settings.py
index e076068..43ae580 100644
--- a/testproject/testproject/settings.py
+++ b/testproject/testproject/settings.py
@@ -25,7 +25,7 @@
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
- "scheduler",
+ "scheduler.apps.SchedulerConfig",
]
MIDDLEWARE = [