{% endblock %}
diff --git a/scheduler/templates/admin/scheduler/jobs-list-with-tasks.partial.html b/scheduler/templates/admin/scheduler/jobs-list-with-tasks.partial.html
new file mode 100644
index 0000000..8b5499a
--- /dev/null
+++ b/scheduler/templates/admin/scheduler/jobs-list-with-tasks.partial.html
@@ -0,0 +1,76 @@
+{% load scheduler_tags i18n %}
+{% if not add %}
+
+{% endif %}
\ No newline at end of file
diff --git a/scheduler/templates/admin/scheduler/jobs-list.partial.html b/scheduler/templates/admin/scheduler/jobs-list.partial.html
index 8186242..b3d7bb4 100644
--- a/scheduler/templates/admin/scheduler/jobs-list.partial.html
+++ b/scheduler/templates/admin/scheduler/jobs-list.partial.html
@@ -20,7 +20,12 @@
- {{ exec.id }}
+ {{ exec.name }}
+ {% if exec.scheduled_task_id %}
+
+ Go to scheduled task
+
+ {% endif %}
|
{{ exec|job_status }}
diff --git a/scheduler/templates/admin/scheduler/jobs.html b/scheduler/templates/admin/scheduler/jobs.html
index e83d96a..72f8a79 100644
--- a/scheduler/templates/admin/scheduler/jobs.html
+++ b/scheduler/templates/admin/scheduler/jobs.html
@@ -92,11 +92,11 @@
|
+ value="{{ job.name }}"/>
|
-
- {{ job.id }}
+
+ {{ job.name }}
|
diff --git a/scheduler/templates/admin/scheduler/queue_workers.html b/scheduler/templates/admin/scheduler/queue_workers.html
index c1b28b5..122872c 100644
--- a/scheduler/templates/admin/scheduler/queue_workers.html
+++ b/scheduler/templates/admin/scheduler/queue_workers.html
@@ -18,7 +18,7 @@
{% endblock %}
-{% block content_title %}Workers in {{ queue.name }}{% endblock %}
+{% block content_title %}Queue {{ queue.name }} workers{% endblock %}
{% block content %}
diff --git a/scheduler/templates/admin/scheduler/single_job_action.html b/scheduler/templates/admin/scheduler/single_job_action.html
index 53f9089..b6adad5 100644
--- a/scheduler/templates/admin/scheduler/single_job_action.html
+++ b/scheduler/templates/admin/scheduler/single_job_action.html
@@ -6,7 +6,7 @@
Home ›
Queues ›
{{ queue.name }} ›
- {{ job.id }} ›
+ {{ job.name }} ›
Delete
{% endblock %}
@@ -18,8 +18,8 @@
Are you sure you want to {{ action }}
-
- {{ job.id }} ({{ job|show_func_name }})
+
+ {{ job.name }} ({{ job|show_func_name }})
from
{{ queue.name }}?
diff --git a/scheduler/templates/admin/scheduler/stats.html b/scheduler/templates/admin/scheduler/stats.html
index 369e3a5..bb7e41c 100644
--- a/scheduler/templates/admin/scheduler/stats.html
+++ b/scheduler/templates/admin/scheduler/stats.html
@@ -9,7 +9,7 @@
}
{% endblock %}
-{% block content_title %} RQ Queues{% endblock %}
+{% block content_title %} Tasks Queues{% endblock %}
{% block breadcrumbs %}
@@ -21,20 +21,18 @@
{% block content %}
-
Name |
+ Scheduled Jobs |
Queued Jobs |
- Oldest Queued Job |
Active Jobs |
- Deferred Jobs |
Finished Jobs |
Failed Jobs |
- Scheduled Jobs |
Canceled Jobs |
+ Oldest Queued Job |
Workers |
Host |
Port |
@@ -47,51 +45,47 @@
{% for queue in queues %}
-
+ |
{{ queue.name }}
-
+ |
+
+
+ {{ queue.scheduled_jobs }}
+
+ |
- {{ queue.jobs }}
+ {{ queue.queued_jobs }}
|
- {{ queue.oldest_job_timestamp }} |
-
-
+ |
+
{{ queue.started_jobs }}
-
- |
-
- {{ queue.deferred_jobs }}
-
- |
-
+
+ |
{{ queue.finished_jobs }}
-
- |
+
+ |
{{ queue.failed_jobs }}
-
- |
-
- {{ queue.scheduled_jobs }}
-
- |
-
+
+ |
{{ queue.canceled_jobs }}
-
- |
- {{ queue.workers }}
-
- |
+
+ {{ queue.oldest_job_timestamp }} |
+
+
+ {{ queue.workers }}
+
+ |
{{ queue.connection_kwargs.host }} |
{{ queue.connection_kwargs.port }} |
{{ queue.connection_kwargs.db }} |
diff --git a/scheduler/templates/admin/scheduler/worker_details.html b/scheduler/templates/admin/scheduler/worker_details.html
index 12ddfb6..989a1c6 100644
--- a/scheduler/templates/admin/scheduler/worker_details.html
+++ b/scheduler/templates/admin/scheduler/worker_details.html
@@ -12,80 +12,84 @@
{% block content %}
{% endfor %}
diff --git a/scheduler/templates/admin/scheduler/workers.html b/scheduler/templates/admin/scheduler/workers_list.html
similarity index 91%
rename from scheduler/templates/admin/scheduler/workers.html
rename to scheduler/templates/admin/scheduler/workers_list.html
index 09da96b..3eb3e15 100644
--- a/scheduler/templates/admin/scheduler/workers.html
+++ b/scheduler/templates/admin/scheduler/workers_list.html
@@ -17,7 +17,7 @@
{% endblock %}
-{% block content_title %}Django Workers{% endblock %}
+{% block content_title %}{{ workers|length }} Tasks Workers{% endblock %}
{% block content %}
diff --git a/scheduler/templatetags/scheduler_tags.py b/scheduler/templatetags/scheduler_tags.py
index faa0835..521c68e 100644
--- a/scheduler/templatetags/scheduler_tags.py
+++ b/scheduler/templatetags/scheduler_tags.py
@@ -3,18 +3,21 @@
from django import template
from django.utils.safestring import mark_safe
-from scheduler.rq_classes import JobExecution, DjangoQueue, DjangoWorker
-from scheduler.tools import get_scheduled_task
+from scheduler.helpers.queues import Queue
+from scheduler.helpers.tools import get_scheduled_task
+from scheduler.models.task import Task
+from scheduler.redis_models import Result, JobModel
+from scheduler.views.helpers import get_queue
register = template.Library()
@register.filter
-def show_func_name(rq_job: JobExecution) -> str:
+def show_func_name(job: JobModel) -> str:
try:
- res = rq_job.func_name
- if res == "scheduler.tools.run_task":
- task = get_scheduled_task(*rq_job.args)
+ res = job.func_name
+ if res == "scheduler.helpers.tools.run_task":
+ task = get_scheduled_task(*job.args)
res = task.function_string()
return mark_safe(res)
except Exception as e:
@@ -27,31 +30,32 @@ def get_item(dictionary: Dict, key):
@register.filter
-def scheduled_job(job: JobExecution):
- django_scheduled_job = get_scheduled_task(*job.args)
- return django_scheduled_job.get_absolute_url()
+def scheduled_task(job: JobModel) -> Task:
+ django_scheduled_task = get_scheduled_task(*job.args)
+ return django_scheduled_task.get_absolute_url()
@register.filter
-def worker_scheduler_pid(worker: Optional[DjangoWorker]) -> str:
- scheduler_pid = worker.scheduler_pid() if worker is not None else None
- return str(scheduler_pid) if scheduler_pid is not None else "-"
+def job_result(job: JobModel) -> Optional[str]:
+ queue = get_queue(job.queue_name)
+ result = Result.fetch_latest(queue.connection, job.name)
+ return result.type.name.capitalize() if result is not None else None
@register.filter
-def job_result(job: JobExecution):
- result = job.latest_result()
- return result.type.name.capitalize() if result else None
+def job_scheduled_task(job: JobModel) -> Optional[str]:
+ task = Task.objects.filter(id=job.scheduled_task_id).first()
+ return task.name if task is not None else None
@register.filter
-def job_status(job: JobExecution):
- result = job.get_status()
+def job_status(job: JobModel):
+ result = job.status
return result.capitalize()
@register.filter
-def job_runtime(job: JobExecution):
+def job_runtime(job: JobModel):
ended_at = job.ended_at
if ended_at:
runtime = job.ended_at - job.started_at
@@ -63,8 +67,8 @@ def job_runtime(job: JobExecution):
@register.filter
-def job_scheduled_time(job: JobExecution, queue: DjangoQueue):
+def job_scheduled_time(job: JobModel, queue: Queue):
try:
- return queue.scheduled_job_registry.get_scheduled_time(job.id)
+ return queue.scheduled_job_registry.get_scheduled_time(job.name)
except Exception:
return None
diff --git a/scheduler/tests/jobs.py b/scheduler/tests/jobs.py
index a6b0871..3b0891a 100644
--- a/scheduler/tests/jobs.py
+++ b/scheduler/tests/jobs.py
@@ -1,6 +1,7 @@
+import logging
from time import sleep
-from scheduler.queues import get_queue
+from scheduler.helpers.queues import get_queue
_counter = 0
@@ -17,9 +18,13 @@ def test_args_kwargs(*args, **kwargs):
kwargs_list = [f"{k}={v}" for (k, v) in kwargs.items()]
return func.format(", ".join(args_list + kwargs_list))
+def two_seconds_job():
+ sleep(2)
+ logging.info(f"Job {_counter}")
def long_job():
- sleep(10)
+ sleep(1000)
+ logging.info(f"Job {_counter}")
test_non_callable = "I am a teapot"
@@ -36,4 +41,4 @@ def test_job():
def enqueue_jobs():
queue = get_queue()
for i in range(20):
- queue.enqueue(test_job, job_id=f"job_{i}", args=())
+ queue.create_and_enqueue_job(test_job, name=f"job_{i:03}", args=())
diff --git a/scheduler/tests/test_internals.py b/scheduler/tests/test_internals.py
index f916a48..aeaf556 100644
--- a/scheduler/tests/test_internals.py
+++ b/scheduler/tests/test_internals.py
@@ -2,9 +2,10 @@
from django.utils import timezone
+from scheduler.helpers.callback import Callback, CallbackSetupError
from scheduler.models.task import TaskType
from scheduler.tests.testtools import SchedulerBaseCase, task_factory
-from scheduler.tools import get_scheduled_task
+from scheduler.helpers.tools import get_scheduled_task
class TestInternals(SchedulerBaseCase):
@@ -15,3 +16,20 @@ def test_get_scheduled_job(self):
get_scheduled_task(task.task_type, task.id + 1)
with self.assertRaises(ValueError):
get_scheduled_task("UNKNOWN_JOBTYPE", task.id)
+
+ def test_callback_bad_arguments(self):
+ with self.assertRaises(CallbackSetupError) as cm:
+ Callback("scheduler.tests.jobs.test_job", "1m")
+ self.assertEqual(str(cm.exception), "Callback `timeout` must be a positive int, but received 1m")
+ with self.assertRaises(CallbackSetupError) as cm:
+ Callback("scheduler.tests.jobs.non_existing_method")
+ self.assertEqual(str(cm.exception), "Invalid attribute name: non_existing_method")
+ with self.assertRaises(CallbackSetupError) as cm:
+ Callback("scheduler.tests.non_existing_module.non_existing_method")
+ self.assertEqual(str(cm.exception), "Invalid attribute name: non_existing_method")
+ with self.assertRaises(CallbackSetupError) as cm:
+ Callback("non_existing_method")
+ self.assertEqual(str(cm.exception), "Invalid attribute name: non_existing_method")
+ with self.assertRaises(CallbackSetupError) as cm:
+ Callback(1)
+ self.assertEqual(str(cm.exception), "Callback `func` must be a string or function, received 1")
diff --git a/scheduler/tests/test_job_decorator.py b/scheduler/tests/test_job_decorator.py
index 820c014..ec10d16 100644
--- a/scheduler/tests/test_job_decorator.py
+++ b/scheduler/tests/test_job_decorator.py
@@ -2,13 +2,14 @@
from django.test import TestCase
-from scheduler import job, settings
+from scheduler import settings
+from scheduler.helpers.queues import get_queue
from . import test_settings # noqa
-from ..decorators import JOB_METHODS_LIST
-from ..queues import get_queue, QueueNotFoundError
+from ..decorators import JOB_METHODS_LIST, job
+from ..redis_models.job import JobModel
-@job
+@job()
def test_job():
time.sleep(1)
return 1 + 1
@@ -36,45 +37,56 @@ def setUp(self) -> None:
get_queue("default").connection.flushall()
def test_all_job_methods_registered(self):
- self.assertEqual(1, len(JOB_METHODS_LIST))
+ self.assertEqual(4, len(JOB_METHODS_LIST))
def test_job_decorator_no_params(self):
test_job.delay()
- config = settings.SCHEDULER_CONFIG
- self._assert_job_with_func_and_props("default", test_job, config.DEFAULT_RESULT_TTL, config.DEFAULT_TIMEOUT)
+ self._assert_job_with_func_and_props(
+ "default",
+ test_job,
+ settings.SCHEDULER_CONFIG.DEFAULT_SUCCESS_TTL,
+ settings.SCHEDULER_CONFIG.DEFAULT_JOB_TIMEOUT,
+ )
def test_job_decorator_timeout(self):
test_job_timeout.delay()
- config = settings.SCHEDULER_CONFIG
- self._assert_job_with_func_and_props("default", test_job_timeout, config.DEFAULT_RESULT_TTL, 1)
+ self._assert_job_with_func_and_props(
+ "default",
+ test_job_timeout,
+ settings.SCHEDULER_CONFIG.DEFAULT_SUCCESS_TTL,
+ 1,
+ )
def test_job_decorator_result_ttl(self):
test_job_result_ttl.delay()
- config = settings.SCHEDULER_CONFIG
- self._assert_job_with_func_and_props("default", test_job_result_ttl, 1, config.DEFAULT_TIMEOUT)
+ self._assert_job_with_func_and_props(
+ "default",
+ test_job_result_ttl,
+ 1,
+ settings.SCHEDULER_CONFIG.DEFAULT_JOB_TIMEOUT,
+ )
def test_job_decorator_different_queue(self):
test_job_diff_queue.delay()
- config = settings.SCHEDULER_CONFIG
self._assert_job_with_func_and_props(
"django_tasks_scheduler_test",
test_job_diff_queue,
- config.DEFAULT_RESULT_TTL,
- config.DEFAULT_TIMEOUT,
+ settings.SCHEDULER_CONFIG.DEFAULT_SUCCESS_TTL,
+ settings.SCHEDULER_CONFIG.DEFAULT_JOB_TIMEOUT,
)
def _assert_job_with_func_and_props(self, queue_name, expected_func, expected_result_ttl, expected_timeout):
queue = get_queue(queue_name)
- jobs = queue.get_jobs()
+ jobs = JobModel.get_many(queue.queued_job_registry.all(), queue.connection)
self.assertEqual(1, len(jobs))
j = jobs[0]
self.assertEqual(j.func, expected_func)
- self.assertEqual(j.result_ttl, expected_result_ttl)
+ self.assertEqual(j.success_ttl, expected_result_ttl)
self.assertEqual(j.timeout, expected_timeout)
def test_job_decorator_bad_queue(self):
- with self.assertRaises(QueueNotFoundError):
+ with self.assertRaises(settings.QueueNotFoundError):
@job("bad-queue")
def test_job_bad_queue():
diff --git a/scheduler/tests/test_mgmt_commands/test_delete_failed_executions.py b/scheduler/tests/test_mgmt_commands/test_delete_failed_executions.py
index cff48d8..68837ac 100644
--- a/scheduler/tests/test_mgmt_commands/test_delete_failed_executions.py
+++ b/scheduler/tests/test_mgmt_commands/test_delete_failed_executions.py
@@ -1,19 +1,19 @@
from django.core.management import call_command
-from scheduler.queues import get_queue
-from scheduler.tests.jobs import failing_job
-from scheduler.tests.test_views import BaseTestCase
-from scheduler.tools import create_worker
+from scheduler.helpers.queues import get_queue
from scheduler.tests import test_settings # noqa
+from scheduler.tests.jobs import failing_job
+from scheduler.helpers.tools import create_worker
+from scheduler.tests.test_views.base import BaseTestCase
class DeleteFailedExecutionsTest(BaseTestCase):
def test_delete_failed_executions__delete_jobs(self):
queue = get_queue("default")
call_command("delete_failed_executions", queue="default")
- queue.enqueue(failing_job)
- worker = create_worker("default")
- worker.work(burst=True)
+ queue.create_and_enqueue_job(failing_job)
+ worker = create_worker("default",burst=True)
+ worker.work()
self.assertEqual(1, len(queue.failed_job_registry))
call_command("delete_failed_executions", queue="default")
self.assertEqual(0, len(queue.failed_job_registry))
diff --git a/scheduler/tests/test_mgmt_commands/test_export.py b/scheduler/tests/test_mgmt_commands/test_export.py
index 6ad1878..6e274f6 100644
--- a/scheduler/tests/test_mgmt_commands/test_export.py
+++ b/scheduler/tests/test_mgmt_commands/test_export.py
@@ -9,7 +9,7 @@
from scheduler.tests import test_settings # noqa
from scheduler.tests.testtools import task_factory
-from scheduler.tools import TaskType
+from scheduler.helpers.tools import TaskType
class ExportTest(TestCase):
@@ -22,34 +22,34 @@ def tearDown(self) -> None:
os.remove(self.tmpfile.name)
def test_export__should_export_job(self):
- jobs = list()
- jobs.append(task_factory(TaskType.ONCE, enabled=True))
- jobs.append(task_factory(TaskType.REPEATABLE, enabled=True))
+ tasks = list()
+ tasks.append(task_factory(TaskType.ONCE, enabled=True))
+ tasks.append(task_factory(TaskType.REPEATABLE, enabled=True))
# act
call_command("export", filename=self.tmpfile.name)
# assert
result = json.load(self.tmpfile)
- self.assertEqual(len(jobs), len(result))
- self.assertEqual(result[0], jobs[0].to_dict())
- self.assertEqual(result[1], jobs[1].to_dict())
+ self.assertEqual(len(tasks), len(result))
+ self.assertEqual(result[0], tasks[0].to_dict())
+ self.assertEqual(result[1], tasks[1].to_dict())
def test_export__should_export_enabled_jobs_only(self):
- jobs = list()
- jobs.append(task_factory(TaskType.ONCE, enabled=True))
- jobs.append(task_factory(TaskType.REPEATABLE, enabled=False))
+ tasks = list()
+ tasks.append(task_factory(TaskType.ONCE, enabled=True))
+ tasks.append(task_factory(TaskType.REPEATABLE, enabled=False))
# act
call_command("export", filename=self.tmpfile.name, enabled=True)
# assert
result = json.load(self.tmpfile)
- self.assertEqual(len(jobs) - 1, len(result))
- self.assertEqual(result[0], jobs[0].to_dict())
+ self.assertEqual(len(tasks) - 1, len(result))
+ self.assertEqual(result[0], tasks[0].to_dict())
def test_export__should_export_job_yaml_without_yaml_lib(self):
- jobs = list()
- jobs.append(task_factory(TaskType.ONCE, enabled=True))
- jobs.append(task_factory(TaskType.REPEATABLE, enabled=True))
+ tasks = list()
+ tasks.append(task_factory(TaskType.ONCE, enabled=True))
+ tasks.append(task_factory(TaskType.REPEATABLE, enabled=True))
# act
with mock.patch.dict("sys.modules", {"yaml": None}):
@@ -58,16 +58,16 @@ def test_export__should_export_job_yaml_without_yaml_lib(self):
self.assertEqual(cm.exception.code, 1)
def test_export__should_export_job_yaml_green(self):
- jobs = list()
- jobs.append(task_factory(TaskType.ONCE, enabled=True))
- jobs.append(task_factory(TaskType.REPEATABLE, enabled=True))
- jobs.append(task_factory(TaskType.CRON, enabled=True))
+ tasks = list()
+ tasks.append(task_factory(TaskType.ONCE, enabled=True))
+ tasks.append(task_factory(TaskType.REPEATABLE, enabled=True))
+ tasks.append(task_factory(TaskType.CRON, enabled=True))
# act
call_command("export", filename=self.tmpfile.name, format="yaml")
# assert
result = yaml.load(self.tmpfile, yaml.SafeLoader)
- self.assertEqual(len(jobs), len(result))
- self.assertEqual(result[0], jobs[0].to_dict())
- self.assertEqual(result[1], jobs[1].to_dict())
- self.assertEqual(result[2], jobs[2].to_dict())
+ self.assertEqual(len(tasks), len(result))
+ self.assertEqual(result[0], tasks[0].to_dict())
+ self.assertEqual(result[1], tasks[1].to_dict())
+ self.assertEqual(result[2], tasks[2].to_dict())
diff --git a/scheduler/tests/test_mgmt_commands/test_import.py b/scheduler/tests/test_mgmt_commands/test_import.py
index db95e66..318c068 100644
--- a/scheduler/tests/test_mgmt_commands/test_import.py
+++ b/scheduler/tests/test_mgmt_commands/test_import.py
@@ -8,9 +8,9 @@
from django.test import TestCase
from scheduler.models.task import Task
-from scheduler.tests.testtools import task_factory
-from scheduler.tools import TaskType
from scheduler.tests import test_settings # noqa
+from scheduler.tests.testtools import task_factory
+from scheduler.helpers.tools import TaskType
class ImportTest(TestCase):
diff --git a/scheduler/tests/test_mgmt_commands/test_rq_stats.py b/scheduler/tests/test_mgmt_commands/test_rq_stats.py
deleted file mode 100644
index dc43a49..0000000
--- a/scheduler/tests/test_mgmt_commands/test_rq_stats.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from django.core.management import call_command
-from django.test import TestCase
-
-from scheduler.tests import test_settings # noqa
-
-
-class RqstatsTest(TestCase):
- def test_rqstats__does_not_fail(self):
- call_command("rqstats", "-j")
- call_command("rqstats", "-y")
- call_command("rqstats")
diff --git a/scheduler/tests/test_mgmt_commands/test_rq_worker.py b/scheduler/tests/test_mgmt_commands/test_rq_worker.py
deleted file mode 100644
index b2c4967..0000000
--- a/scheduler/tests/test_mgmt_commands/test_rq_worker.py
+++ /dev/null
@@ -1,114 +0,0 @@
-from django.core.management import call_command
-from django.test import TestCase
-
-from scheduler.queues import get_queue
-from scheduler.tests.jobs import failing_job
-from scheduler.tests import test_settings # noqa
-
-
-class RqworkerTestCase(TestCase):
- def test_rqworker__no_queues_params(self):
- queue = get_queue("default")
-
- # enqueue some jobs that will fail
- jobs = []
- job_ids = []
- for _ in range(0, 3):
- job = queue.enqueue(failing_job)
- jobs.append(job)
- job_ids.append(job.id)
-
- # Create a worker to execute these jobs
- call_command("rqworker", fork_job_execution=False, burst=True)
-
- # check if all jobs are really failed
- for job in jobs:
- self.assertTrue(job.is_failed)
-
- def test_rqworker__job_class_param__green(self):
- queue = get_queue("default")
-
- # enqueue some jobs that will fail
- jobs = []
- job_ids = []
- for _ in range(0, 3):
- job = queue.enqueue(failing_job)
- jobs.append(job)
- job_ids.append(job.id)
-
- # Create a worker to execute these jobs
- call_command(
- "rqworker", "--job-class", "scheduler.rq_classes.JobExecution", fork_job_execution=False, burst=True
- )
-
- # check if all jobs are really failed
- for job in jobs:
- self.assertTrue(job.is_failed)
-
- def test_rqworker__bad_job_class__fail(self):
- queue = get_queue("default")
-
- # enqueue some jobs that will fail
- jobs = []
- job_ids = []
- for _ in range(0, 3):
- job = queue.enqueue(failing_job)
- jobs.append(job)
- job_ids.append(job.id)
-
- # Create a worker to execute these jobs
- with self.assertRaises(ImportError):
- call_command("rqworker", "--job-class", "rq.badclass", fork_job_execution=False, burst=True)
-
- def test_rqworker__run_jobs(self):
- queue = get_queue("default")
-
- # enqueue some jobs that will fail
- jobs = []
- job_ids = []
- for _ in range(0, 3):
- job = queue.enqueue(failing_job)
- jobs.append(job)
- job_ids.append(job.id)
-
- # Create a worker to execute these jobs
- call_command("rqworker", "default", fork_job_execution=False, burst=True)
-
- # check if all jobs are really failed
- for job in jobs:
- self.assertTrue(job.is_failed)
-
- def test_rqworker__worker_with_two_queues(self):
- queue = get_queue("default")
- queue2 = get_queue("django_tasks_scheduler_test")
-
- # enqueue some jobs that will fail
- jobs = []
- job_ids = []
- for _ in range(0, 3):
- job = queue.enqueue(failing_job)
- jobs.append(job)
- job_ids.append(job.id)
- job = queue2.enqueue(failing_job)
- jobs.append(job)
- job_ids.append(job.id)
-
- # Create a worker to execute these jobs
- call_command("rqworker", "default", "django_tasks_scheduler_test", fork_job_execution=False, burst=True)
-
- # check if all jobs are really failed
- for job in jobs:
- self.assertTrue(job.is_failed)
-
- def test_rqworker__worker_with_one_queue__does_not_perform_other_queue_job(self):
- queue = get_queue("default")
- queue2 = get_queue("django_tasks_scheduler_test")
-
- job = queue.enqueue(failing_job)
- other_job = queue2.enqueue(failing_job)
-
- # Create a worker to execute these jobs
- call_command("rqworker", "default", fork_job_execution=False, burst=True)
- # assert
- self.assertTrue(job.is_failed)
- self.assertTrue(other_job.is_queued)
diff --git a/scheduler/tests/test_mgmt_commands/test_run_job.py b/scheduler/tests/test_mgmt_commands/test_run_job.py
index 7429c63..a589589 100644
--- a/scheduler/tests/test_mgmt_commands/test_run_job.py
+++ b/scheduler/tests/test_mgmt_commands/test_run_job.py
@@ -1,9 +1,10 @@
from django.core.management import call_command
from django.test import TestCase
-from scheduler.queues import get_queue
-from scheduler.tests.jobs import test_job
+from scheduler.helpers.queues import get_queue
+from scheduler.redis_models import JobModel
from scheduler.tests import test_settings # noqa
+from scheduler.tests.jobs import test_job
class RunJobTest(TestCase):
@@ -14,6 +15,6 @@ def test_run_job__should_schedule_job(self):
# act
call_command("run_job", func_name, queue="default")
# assert
- job_list = queue.get_jobs()
+ job_list = JobModel.get_many(queue.queued_job_registry.all(), queue.connection)
self.assertEqual(1, len(job_list))
self.assertEqual(func_name + "()", job_list[0].get_call_string())
diff --git a/scheduler/tests/test_mgmt_commands/test_scheduler_stats.py b/scheduler/tests/test_mgmt_commands/test_scheduler_stats.py
new file mode 100644
index 0000000..b58e835
--- /dev/null
+++ b/scheduler/tests/test_mgmt_commands/test_scheduler_stats.py
@@ -0,0 +1,11 @@
+from django.core.management import call_command
+from django.test import TestCase
+
+from scheduler.tests import test_settings # noqa
+
+
+class SchedulerStatsTest(TestCase):
+ def test_scheduler_stats__does_not_fail(self):
+ call_command("scheduler_stats", "-j")
+ call_command("scheduler_stats", "-y")
+ call_command("scheduler_stats")
diff --git a/scheduler/tests/test_mgmt_commands/test_scheduler_worker.py b/scheduler/tests/test_mgmt_commands/test_scheduler_worker.py
new file mode 100644
index 0000000..02b247d
--- /dev/null
+++ b/scheduler/tests/test_mgmt_commands/test_scheduler_worker.py
@@ -0,0 +1,80 @@
+from django.core.management import call_command
+from django.test import TestCase
+
+from scheduler.helpers.queues import get_queue
+from scheduler.redis_models import JobModel
+from scheduler.tests import test_settings # noqa
+from scheduler.tests.jobs import failing_job
+
+
+class SchedulerWorkerTestCase(TestCase):
+ def test_scheduler_worker__no_queues_params(self):
+ queue = get_queue("default")
+
+ # enqueue some jobs that will fail
+ job_names = []
+ for _ in range(0, 3):
+ job = queue.create_and_enqueue_job(failing_job)
+ job_names.append(job.name)
+
+ # Create a worker to execute these jobs
+ call_command("scheduler_worker", fork_job_execution=False, burst=True)
+
+ # check if all jobs are really failed
+ for job_name in job_names:
+ job = JobModel.get(name=job_name, connection=queue.connection)
+ self.assertTrue(job.is_failed)
+
+ def test_scheduler_worker__run_jobs(self):
+ queue = get_queue("default")
+
+ # enqueue some jobs that will fail
+ job_names = []
+ for _ in range(0, 3):
+ job = queue.create_and_enqueue_job(failing_job)
+ job_names.append(job.name)
+
+ # Create a worker to execute these jobs
+ call_command("scheduler_worker", "default", fork_job_execution=False, burst=True)
+
+ # check if all jobs are really failed
+ for job_name in job_names:
+ job = JobModel.get(name=job_name, connection=queue.connection)
+ self.assertTrue(job.is_failed)
+
+ def test_scheduler_worker__worker_with_two_queues(self):
+ queue = get_queue("default")
+ queue2 = get_queue("django_tasks_scheduler_test")
+
+ # enqueue some jobs that will fail
+ job_names = []
+ for _ in range(0, 3):
+ job = queue.create_and_enqueue_job(failing_job)
+ job_names.append(job.name)
+ job = queue2.create_and_enqueue_job(failing_job)
+ job_names.append(job.name)
+
+ # Create a worker to execute these jobs
+ call_command("scheduler_worker", "default", "django_tasks_scheduler_test", fork_job_execution=False, burst=True)
+
+ # check if all jobs are really failed
+ for job_name in job_names:
+ job = JobModel.get(name=job_name, connection=queue.connection)
+ self.assertTrue(job.is_failed)
+
+ def test_scheduler_worker__worker_with_one_queue__does_not_perform_other_queue_job(self):
+ queue = get_queue("default")
+ queue2 = get_queue("django_tasks_scheduler_test")
+
+ job = queue.create_and_enqueue_job(failing_job)
+ other_job = queue2.create_and_enqueue_job(failing_job)
+
+ # Create a worker to execute these jobs
+ call_command("scheduler_worker", "default", fork_job_execution=False, burst=True)
+
+ # assert
+ job = JobModel.get(job.name, connection=queue.connection)
+ self.assertTrue(job.is_failed)
+ other_job = JobModel.get(other_job.name, connection=queue.connection)
+
+ self.assertTrue(other_job.is_queued, f"Expected other job to be queued but status={other_job.status}")
diff --git a/scheduler/tests/test_multiprocess/__init__.py b/scheduler/tests/test_multiprocess/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/scheduler/tests/test_multiprocess/test_integrity.py b/scheduler/tests/test_multiprocess/test_integrity.py
new file mode 100644
index 0000000..bc3f57c
--- /dev/null
+++ b/scheduler/tests/test_multiprocess/test_integrity.py
@@ -0,0 +1,40 @@
+from time import sleep
+
+from django.test import tag
+from django.urls import reverse
+
+from scheduler.helpers.queues import get_queue
+from scheduler.redis_models import JobStatus, JobModel, WorkerModel
+from scheduler.tests.jobs import long_job
+from .. import testtools
+from ..test_views.base import BaseTestCase
+
+
+@tag("multiprocess")
+class MultiProcessTest(BaseTestCase):
+ def test_cancel_job_after_it_started(self):
+ # arrange
+ queue = get_queue("django_tasks_scheduler_test")
+ job = queue.create_and_enqueue_job(long_job)
+ self.assertTrue(job.is_queued)
+ process, worker_name = testtools.run_worker_in_process("django_tasks_scheduler_test")
+ sleep(0.2)
+ job = JobModel.get(job.name, connection=queue.connection)
+ self.assertEqual(JobStatus.STARTED, job.status)
+ # act
+ res = self.client.post(reverse("queue_job_action", args=[job.name, "cancel"]), {"post": "yes"}, follow=True)
+
+ # assert
+ self.assertEqual(200, res.status_code)
+ job = JobModel.get(job.name, connection=queue.connection)
+ self.assertEqual(JobStatus.STOPPED, job.status)
+ self.assertNotIn(job.name, queue.queued_job_registry.all())
+ sleep(0.2)
+ process.terminate()
+ process.join(2)
+ process.kill()
+ worker_model = WorkerModel.get(worker_name, connection=queue.connection)
+ self.assertEqual(0, worker_model.completed_jobs)
+ self.assertEqual(0, worker_model.failed_job_count)
+ self.assertEqual(0, worker_model.successful_job_count)
+ self.assertIsNotNone(worker_model.shutdown_requested_date)
diff --git a/scheduler/tests/test_settings.py b/scheduler/tests/test_settings.py
index 821bd14..93d7f3b 100644
--- a/scheduler/tests/test_settings.py
+++ b/scheduler/tests/test_settings.py
@@ -5,7 +5,7 @@
from scheduler.settings import conf_settings
settings.SCHEDULER_QUEUES = {
- "default": {"HOST": "localhost", "PORT": 6379, "DB": 0, "DEFAULT_TIMEOUT": 500},
+ "default": {"HOST": "localhost", "PORT": 6379, "DB": 0},
"test": {"HOST": "localhost", "PORT": 1, "DB": 1},
"sentinel": {
"SENTINELS": [("localhost", 26736), ("localhost", 26737)],
@@ -13,14 +13,12 @@
"DB": 1,
"USERNAME": "redis-user",
"PASSWORD": "secret",
- "SOCKET_TIMEOUT": 10,
"SENTINEL_KWARGS": {},
},
"test1": {
"HOST": "localhost",
"PORT": 1,
"DB": 1,
- "DEFAULT_TIMEOUT": 400,
},
"test2": {
"HOST": "localhost",
@@ -86,11 +84,10 @@
"HOST": "localhost",
"PORT": 6379,
"DB": 0,
- "DEFAULT_TIMEOUT": 400,
},
}
-if os.getenv("FAKEREDIS", "False") == "True":
- for name, queue_settings in settings.SCHEDULER_QUEUES:
- queue_settings["BROKER"] = "fakeredis"
+if os.getenv("FAKEREDIS", "False") == "True": # pragma: no cover
+ for name, queue_settings in settings.SCHEDULER_QUEUES: # pragma: no cover
+ queue_settings["BROKER"] = "fakeredis" # pragma: no cover
conf_settings()
diff --git a/scheduler/tests/test_task_types/test_cron_task.py b/scheduler/tests/test_task_types/test_cron_task.py
index a7d2a7a..afa2725 100644
--- a/scheduler/tests/test_task_types/test_cron_task.py
+++ b/scheduler/tests/test_task_types/test_cron_task.py
@@ -1,26 +1,32 @@
from django.core.exceptions import ValidationError
from scheduler import settings
-from scheduler.queues import get_queue
+from scheduler.helpers.queues import get_queue
+from scheduler.models.task import TaskType
+from scheduler.redis_models import JobModel
from scheduler.tests.test_task_types.test_task_model import BaseTestCases
from scheduler.tests.testtools import task_factory
-from scheduler.tools import create_worker, TaskType
+from scheduler.helpers.tools import create_worker
class TestCronTask(BaseTestCases.TestBaseTask):
task_type = TaskType.CRON
+ def setUp(self) -> None:
+ super().setUp()
+ self.queue_name = settings.get_queue_names()[0]
+
def test_clean(self):
task = task_factory(self.task_type)
task.cron_string = "* * * * *"
- task.queue = list(settings.QUEUES)[0]
+ task.queue = self.queue_name
task.callable = "scheduler.tests.jobs.test_job"
self.assertIsNone(task.clean())
def test_clean_cron_string_invalid(self):
task = task_factory(self.task_type)
task.cron_string = "not-a-cron-string"
- task.queue = list(settings.QUEUES)[0]
+ task.queue = self.queue_name
task.callable = "scheduler.tests.jobs.test_job"
with self.assertRaises(ValidationError):
task.clean_cron_string()
@@ -28,8 +34,9 @@ def test_clean_cron_string_invalid(self):
def test_check_rescheduled_after_execution(self):
task = task_factory(self.task_type)
queue = task.rqueue
- first_run_id = task.job_id
- entry = queue.fetch_job(first_run_id)
+ first_run_id = task.job_name
+ entry = JobModel.get(first_run_id, connection=queue.connection)
+ self.assertIsNotNone(entry)
queue.run_sync(entry)
task.refresh_from_db()
self.assertEqual(task.failed_runs, 0)
@@ -37,16 +44,13 @@ def test_check_rescheduled_after_execution(self):
self.assertEqual(task.successful_runs, 1)
self.assertIsNotNone(task.last_successful_run)
self.assertTrue(task.is_scheduled())
- self.assertNotEqual(task.job_id, first_run_id)
+ self.assertNotEqual(task.job_name, first_run_id)
def test_check_rescheduled_after_failed_execution(self):
- task = task_factory(
- self.task_type,
- callable_name="scheduler.tests.jobs.scheduler.tests.jobs.test_job",
- )
+ task = task_factory(self.task_type, callable_name="scheduler.tests.jobs.failing_job")
queue = task.rqueue
- first_run_id = task.job_id
- entry = queue.fetch_job(first_run_id)
+ first_run_id = task.job_name
+ entry = JobModel.get(first_run_id, connection=queue.connection)
queue.run_sync(entry)
task.refresh_from_db()
self.assertEqual(task.failed_runs, 1)
@@ -54,25 +58,23 @@ def test_check_rescheduled_after_failed_execution(self):
self.assertEqual(task.successful_runs, 0)
self.assertIsNone(task.last_successful_run)
self.assertTrue(task.is_scheduled())
- self.assertNotEqual(task.job_id, first_run_id)
+ self.assertNotEqual(task.job_name, first_run_id)
def test_cron_task_enqueuing_jobs(self):
queue = get_queue()
- prev_queued = len(queue.scheduled_job_registry)
- prev_finished = len(queue.finished_job_registry)
+ prev_queued = queue.scheduled_job_registry.count(connection=queue.connection)
+ prev_finished = queue.finished_job_registry.count(connection=queue.connection)
+
task = task_factory(self.task_type, callable_name="scheduler.tests.jobs.enqueue_jobs")
- self.assertEqual(prev_queued + 1, len(queue.scheduled_job_registry))
- first_run_id = task.job_id
- entry = queue.fetch_job(first_run_id)
+ self.assertEqual(prev_queued + 1, queue.scheduled_job_registry.count(connection=queue.connection))
+ first_run_id = task.job_name
+ entry = JobModel.get(first_run_id, connection=queue.connection)
queue.run_sync(entry)
- self.assertEqual(20, len(queue))
- self.assertEqual(prev_finished + 1, len(queue.finished_job_registry))
- worker = create_worker(
- "default",
- fork_job_execution=False,
- )
- worker.work(burst=True)
- self.assertEqual(prev_finished + 21, len(queue.finished_job_registry))
- worker.refresh()
- self.assertEqual(20, worker.successful_job_count)
- self.assertEqual(0, worker.failed_job_count)
+ self.assertEqual(20, len(queue.queued_job_registry))
+ self.assertEqual(prev_finished + 1, queue.finished_job_registry.count(connection=queue.connection))
+ worker = create_worker("default", fork_job_execution=False, burst=True)
+ worker.work()
+ self.assertEqual(prev_finished + 21, queue.finished_job_registry.count(connection=queue.connection))
+ worker.refresh(update_queues=True)
+ self.assertEqual(20, worker._model.successful_job_count)
+ self.assertEqual(0, worker._model.failed_job_count)
diff --git a/scheduler/tests/test_task_types/test_once_task.py b/scheduler/tests/test_task_types/test_once_task.py
index f9b686c..b57fd77 100644
--- a/scheduler/tests/test_task_types/test_once_task.py
+++ b/scheduler/tests/test_task_types/test_once_task.py
@@ -10,10 +10,11 @@
class TestScheduledTask(BaseTestCases.TestSchedulableTask):
task_type = TaskType.ONCE
+ queue_name = settings.get_queue_names()[0]
def test_clean(self):
job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
+ job.queue = self.queue_name
job.callable = "scheduler.tests.jobs.test_job"
self.assertIsNone(job.clean())
diff --git a/scheduler/tests/test_task_types/test_repeatable_task.py b/scheduler/tests/test_task_types/test_repeatable_task.py
index fce0d32..bd096d9 100644
--- a/scheduler/tests/test_task_types/test_repeatable_task.py
+++ b/scheduler/tests/test_task_types/test_repeatable_task.py
@@ -5,13 +5,15 @@
from django.utils import timezone
from scheduler import settings
+from scheduler.redis_models import JobModel
from scheduler.tests.test_task_types.test_task_model import BaseTestCases
-from scheduler.tests.testtools import task_factory, _get_task_job_execution_from_registry
-from scheduler.tools import TaskType
+from scheduler.tests.testtools import task_factory, _get_task_scheduled_job_from_registry
+from scheduler.helpers.tools import TaskType
class TestRepeatableTask(BaseTestCases.TestSchedulableTask):
task_type = TaskType.REPEATABLE
+ queue_name = settings.get_queue_names()[0]
def test_unschedulable_old_job(self):
job = task_factory(self.task_type, scheduled_time=timezone.now() - timedelta(hours=1), repeat=0)
@@ -24,39 +26,35 @@ def test_schedulable_old_job_repeat_none(self):
def test_clean(self):
job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
+ job.queue = self.queue_name
job.callable = "scheduler.tests.jobs.test_job"
job.interval = 1
- job.result_ttl = -1
+ job.success_ttl = -1
self.assertIsNone(job.clean())
def test_clean_seconds(self):
job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
+ job.queue = self.queue_name
job.callable = "scheduler.tests.jobs.test_job"
job.interval = 60
- job.result_ttl = -1
+ job.success_ttl = -1
job.interval_unit = "seconds"
self.assertIsNone(job.clean())
- @override_settings(
- SCHEDULER_CONFIG={
- "SCHEDULER_INTERVAL": 10,
- }
- )
+ @override_settings(SCHEDULER_CONFIG={"SCHEDULER_INTERVAL": 10})
def test_clean_too_frequent(self):
job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
+ job.queue = self.queue_name
job.callable = "scheduler.tests.jobs.test_job"
job.interval = 2 # Smaller than 10
- job.result_ttl = -1
+ job.success_ttl = -1
job.interval_unit = "seconds"
with self.assertRaises(ValidationError):
job.clean_interval_unit()
def test_clean_not_multiple(self):
job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
+ job.queue = self.queue_name
job.callable = "scheduler.tests.jobs.test_job"
job.interval = 121
job.interval_unit = "seconds"
@@ -64,41 +62,41 @@ def test_clean_not_multiple(self):
job.clean_interval_unit()
def test_clean_short_result_ttl(self):
- job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
- job.callable = "scheduler.tests.jobs.test_job"
- job.interval = 1
- job.repeat = 1
- job.result_ttl = 3599
- job.interval_unit = "hours"
- job.repeat = 42
+ task = task_factory(self.task_type)
+ task.queue = self.queue_name
+ task.callable = "scheduler.tests.jobs.test_job"
+ task.interval = 1
+ task.repeat = 1
+ task.result_ttl = 3599
+ task.interval_unit = "hours"
+ task.repeat = 42
with self.assertRaises(ValidationError):
- job.clean_result_ttl()
+ task.clean_result_ttl()
def test_clean_indefinite_result_ttl(self):
- job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
- job.callable = "scheduler.tests.jobs.test_job"
- job.interval = 1
- job.result_ttl = -1
- job.interval_unit = "hours"
- job.clean_result_ttl()
+ task = task_factory(self.task_type)
+ task.queue = self.queue_name
+ task.callable = "scheduler.tests.jobs.test_job"
+ task.interval = 1
+ task.result_ttl = -1
+ task.interval_unit = "hours"
+ task.clean_result_ttl()
def test_clean_undefined_result_ttl(self):
- job = task_factory(self.task_type)
- job.queue = list(settings.QUEUES)[0]
- job.callable = "scheduler.tests.jobs.test_job"
- job.interval = 1
- job.interval_unit = "hours"
- job.clean_result_ttl()
+ task = task_factory(self.task_type)
+ task.queue = self.queue_name
+ task.callable = "scheduler.tests.jobs.test_job"
+ task.interval = 1
+ task.interval_unit = "hours"
+ task.clean_result_ttl()
def test_interval_seconds_weeks(self):
- job = task_factory(self.task_type, interval=2, interval_unit="weeks")
- self.assertEqual(1209600.0, job.interval_seconds())
+ task = task_factory(self.task_type, interval=2, interval_unit="weeks")
+ self.assertEqual(1209600.0, task.interval_seconds())
def test_interval_seconds_days(self):
- job = task_factory(self.task_type, interval=2, interval_unit="days")
- self.assertEqual(172800.0, job.interval_seconds())
+ task = task_factory(self.task_type, interval=2, interval_unit="days")
+ self.assertEqual(172800.0, task.interval_seconds())
def test_interval_seconds_hours(self):
job = task_factory(self.task_type, interval=2, interval_unit="hours")
@@ -113,15 +111,13 @@ def test_interval_seconds_seconds(self):
self.assertEqual(15.0, job.interval_seconds())
def test_result_interval(self):
- job = task_factory(
- self.task_type,
- )
- entry = _get_task_job_execution_from_registry(job)
+ job = task_factory(self.task_type)
+ entry = _get_task_scheduled_job_from_registry(job)
self.assertEqual(entry.meta["interval"], 3600)
def test_repeat(self):
job = task_factory(self.task_type, repeat=10)
- entry = _get_task_job_execution_from_registry(job)
+ entry = _get_task_scheduled_job_from_registry(job)
self.assertEqual(entry.meta["repeat"], 10)
def test_repeat_old_job_exhausted(self):
@@ -154,8 +150,8 @@ def test_repeat_none_interval_2_min(self):
def test_check_rescheduled_after_execution(self):
task = task_factory(self.task_type, scheduled_time=timezone.now() + timedelta(seconds=1), repeat=10)
queue = task.rqueue
- first_run_id = task.job_id
- entry = queue.fetch_job(first_run_id)
+ first_run_id = task.job_name
+ entry = JobModel.get(first_run_id, connection=queue.connection)
queue.run_sync(entry)
task.refresh_from_db()
self.assertEqual(task.failed_runs, 0)
@@ -163,7 +159,7 @@ def test_check_rescheduled_after_execution(self):
self.assertEqual(task.successful_runs, 1)
self.assertIsNotNone(task.last_successful_run)
self.assertTrue(task.is_scheduled())
- self.assertNotEqual(task.job_id, first_run_id)
+ self.assertNotEqual(task.job_name, first_run_id)
def test_check_rescheduled_after_execution_failed_job(self):
task = task_factory(
@@ -173,8 +169,8 @@ def test_check_rescheduled_after_execution_failed_job(self):
repeat=10,
)
queue = task.rqueue
- first_run_id = task.job_id
- entry = queue.fetch_job(first_run_id)
+ first_run_id = task.job_name
+ entry = JobModel.get(first_run_id, connection=queue.connection)
queue.run_sync(entry)
task.refresh_from_db()
self.assertEqual(task.failed_runs, 1)
@@ -182,7 +178,7 @@ def test_check_rescheduled_after_execution_failed_job(self):
self.assertEqual(task.successful_runs, 0)
self.assertIsNone(task.last_successful_run)
self.assertTrue(task.is_scheduled())
- self.assertNotEqual(task.job_id, first_run_id)
+ self.assertNotEqual(task.job_name, first_run_id)
def test_check_not_rescheduled_after_last_repeat(self):
task = task_factory(
@@ -191,12 +187,12 @@ def test_check_not_rescheduled_after_last_repeat(self):
repeat=1,
)
queue = task.rqueue
- first_run_id = task.job_id
- entry = queue.fetch_job(first_run_id)
+ first_run_id = task.job_name
+ entry = JobModel.get(first_run_id, connection=queue.connection)
queue.run_sync(entry)
task.refresh_from_db()
self.assertEqual(task.failed_runs, 0)
self.assertIsNone(task.last_failed_run)
self.assertEqual(task.successful_runs, 1)
self.assertIsNotNone(task.last_successful_run)
- self.assertNotEqual(task.job_id, first_run_id)
+ self.assertNotEqual(task.job_name, first_run_id)
diff --git a/scheduler/tests/test_task_types/test_task_model.py b/scheduler/tests/test_task_types/test_task_model.py
index a6f6c3b..eecf7d6 100644
--- a/scheduler/tests/test_task_types/test_task_model.py
+++ b/scheduler/tests/test_task_types/test_task_model.py
@@ -10,16 +10,18 @@
from scheduler import settings
from scheduler.models.task import TaskType, Task, TaskArg, TaskKwarg
-from scheduler.queues import get_queue
-from scheduler.tests import jobs
+from scheduler.helpers.queues import get_queue
+from scheduler.helpers.queues import perform_job
+from scheduler.redis_models import JobStatus, JobModel
+from scheduler.tests import jobs, test_settings # noqa
from scheduler.tests.testtools import (
task_factory,
taskarg_factory,
- _get_task_job_execution_from_registry,
+ _get_task_scheduled_job_from_registry,
SchedulerBaseCase,
_get_executions,
)
-from scheduler.tools import run_task, create_worker
+from scheduler.helpers.tools import run_task, create_worker
def assert_response_has_msg(response, message):
@@ -29,7 +31,7 @@ def assert_response_has_msg(response, message):
def assert_has_execution_with_status(task, status):
job_list = _get_executions(task)
- job_list = [(j.id, j.get_status()) for j in job_list]
+ job_list = [(j.name, j.get_status(connection=task.rqueue.connection)) for j in job_list]
for job in job_list:
if job[1] == status:
return
@@ -39,6 +41,7 @@ def assert_has_execution_with_status(task, status):
class BaseTestCases:
class TestBaseTask(SchedulerBaseCase):
task_type = None
+ queue_name = settings.get_queue_names()[0]
def test_callable_func(self):
task = task_factory(self.task_type)
@@ -64,7 +67,7 @@ def test_clean_callable_invalid(self):
task.clean_callable()
def test_clean_queue(self):
- for queue in settings.QUEUES.keys():
+ for queue in settings.get_queue_names():
task = task_factory(self.task_type)
task.queue = queue
self.assertIsNone(task.clean_queue())
@@ -79,13 +82,13 @@ def test_clean_queue_invalid(self):
# next 2 check the above are included in job.clean() function
def test_clean_base(self):
task = task_factory(self.task_type)
- task.queue = list(settings.QUEUES)[0]
+ task.queue = self.queue_name
task.callable = "scheduler.tests.jobs.test_job"
self.assertIsNone(task.clean())
def test_clean_invalid_callable(self):
task = task_factory(self.task_type)
- task.queue = list(settings.QUEUES)[0]
+ task.queue = self.queue_name
task.callable = "scheduler.tests.jobs.test_non_callable"
with self.assertRaises(ValidationError):
task.clean()
@@ -112,47 +115,47 @@ def test_schedule(self):
self.task_type,
)
self.assertTrue(task.is_scheduled())
- self.assertIsNotNone(task.job_id)
+ self.assertIsNotNone(task.job_name)
def test_unschedulable(self):
task = task_factory(self.task_type, enabled=False)
self.assertFalse(task.is_scheduled())
- self.assertIsNone(task.job_id)
+ self.assertIsNone(task.job_name)
def test_unschedule(self):
task = task_factory(self.task_type)
self.assertTrue(task.unschedule())
- self.assertIsNone(task.job_id)
+ self.assertIsNone(task.job_name)
def test_unschedule_not_scheduled(self):
task = task_factory(self.task_type, enabled=False)
self.assertTrue(task.unschedule())
- self.assertIsNone(task.job_id)
+ self.assertIsNone(task.job_name)
def test_save_enabled(self):
task = task_factory(self.task_type)
- self.assertIsNotNone(task.job_id)
+ self.assertIsNotNone(task.job_name)
def test_save_disabled(self):
task = task_factory(self.task_type, enabled=False)
task.save()
- self.assertIsNone(task.job_id)
+ self.assertIsNone(task.job_name)
def test_save_and_schedule(self):
task = task_factory(self.task_type)
- self.assertIsNotNone(task.job_id)
+ self.assertIsNotNone(task.job_name)
self.assertTrue(task.is_scheduled())
def test_schedule2(self):
task = task_factory(self.task_type)
- task.queue = list(settings.QUEUES)[0]
+ task.queue = self.queue_name
task.enabled = False
task.scheduled_time = timezone.now() + timedelta(minutes=1)
self.assertFalse(task._schedule())
def test_delete_and_unschedule(self):
task = task_factory(self.task_type)
- self.assertIsNotNone(task.job_id)
+ self.assertIsNotNone(task.job_name)
self.assertTrue(task.is_scheduled())
task.delete()
self.assertFalse(task.is_scheduled())
@@ -169,32 +172,34 @@ def test_str(self):
def test_callable_passthrough(self):
task = task_factory(self.task_type)
- entry = _get_task_job_execution_from_registry(task)
+ entry = _get_task_scheduled_job_from_registry(task)
self.assertEqual(entry.func, run_task)
- job_model, job_id = entry.args
+ job_model, task_id = entry.args
self.assertEqual(job_model, self.task_type.value)
- self.assertEqual(job_id, task.id)
+ self.assertEqual(task_id, task.id)
def test_timeout_passthrough(self):
task = task_factory(self.task_type, timeout=500)
- entry = _get_task_job_execution_from_registry(task)
+ entry = _get_task_scheduled_job_from_registry(task)
self.assertEqual(entry.timeout, 500)
def test_at_front_passthrough(self):
task = task_factory(self.task_type, at_front=True)
queue = task.rqueue
- jobs_to_schedule = queue.scheduled_job_registry.get_job_ids()
- self.assertIn(task.job_id, jobs_to_schedule)
+ jobs_to_schedule = queue.scheduled_job_registry.all()
+ self.assertIn(task.job_name, jobs_to_schedule)
def test_callable_result(self):
task = task_factory(self.task_type)
- entry = _get_task_job_execution_from_registry(task)
- self.assertEqual(entry.perform(), 2)
+ entry = _get_task_scheduled_job_from_registry(task)
+ queue = get_queue("default")
+ self.assertEqual(perform_job(entry, connection=queue.connection), 2)
def test_callable_empty_args_and_kwargs(self):
task = task_factory(self.task_type, callable="scheduler.tests.jobs.test_args_kwargs")
- entry = _get_task_job_execution_from_registry(task)
- self.assertEqual(entry.perform(), "test_args_kwargs()")
+ entry = _get_task_scheduled_job_from_registry(task)
+ queue = get_queue("default")
+ self.assertEqual(perform_job(entry, connection=queue.connection), "test_args_kwargs()")
def test_delete_args(self):
task = task_factory(self.task_type)
@@ -238,8 +243,12 @@ def test_callable_args_and_kwargs(self):
taskarg_factory(TaskKwarg, key="key2", arg_type="datetime", val=date, content_object=task)
taskarg_factory(TaskKwarg, key="key3", arg_type="bool", val=False, content_object=task)
task.save()
- entry = _get_task_job_execution_from_registry(task)
- self.assertEqual(entry.perform(), "test_args_kwargs('one', key1=2, key2={}, key3=False)".format(date))
+ entry = _get_task_scheduled_job_from_registry(task)
+ queue = get_queue("default")
+ self.assertEqual(
+ perform_job(entry, connection=queue.connection),
+ "test_args_kwargs('one', key1=2, key2={}, key3=False)".format(date),
+ )
def test_function_string(self):
task = task_factory(self.task_type)
@@ -310,7 +319,9 @@ def test_admin_run_job_now_enqueues_job_at(self):
self.assertEqual(302, res.status_code)
task.refresh_from_db()
queue = get_queue(task.queue)
- self.assertIn(task.job_id, queue.get_job_ids())
+ assert_has_execution_with_status(task, JobStatus.QUEUED)
+ self.assertIn(task.job_name, queue.scheduled_job_registry.all())
+
def test_admin_change_view(self):
# arrange
@@ -351,7 +362,9 @@ def test_admin_enqueue_job_now(self):
# arrange
self.client.login(username="admin", password="admin")
task = task_factory(self.task_type)
- self.assertIsNotNone(task.job_id)
+ self.assertIsNotNone(task.job_name)
+ job = JobModel.get(task.job_name, connection=task.rqueue.connection)
+ self.assertEqual(job.status, JobStatus.SCHEDULED)
self.assertTrue(task.is_scheduled())
data = {
"action": "enqueue_job_now",
@@ -359,30 +372,26 @@ def test_admin_enqueue_job_now(self):
task.id,
],
}
- model = task._meta.model.__name__.lower()
- url = reverse(f"admin:scheduler_{model}_changelist")
+ url = reverse("admin:scheduler_task_changelist")
# act
res = self.client.post(url, data=data, follow=True)
# assert part 1
self.assertEqual(200, res.status_code)
- entry = _get_task_job_execution_from_registry(task)
- task_model, scheduled_task_id = entry.args
- self.assertEqual(task_model, task.task_type)
+ assert_has_execution_with_status(task, JobStatus.QUEUED)
+ entry = _get_task_scheduled_job_from_registry(task)
+ task_type, scheduled_task_id = entry.args
+ self.assertEqual(task_type, task.task_type)
self.assertEqual(scheduled_task_id, task.id)
- self.assertEqual("scheduled", entry.get_status())
- assert_has_execution_with_status(task, "queued")
+ self.assertEqual(JobStatus.SCHEDULED, entry.get_status(connection=task.rqueue.connection))
# act 2
- worker = create_worker(
- "default",
- fork_job_execution=False,
- )
- worker.work(burst=True)
+ worker = create_worker("default", fork_job_execution=False, burst=True)
+ worker.work()
# assert 2
- entry = _get_task_job_execution_from_registry(task)
- self.assertEqual(task_model, task.task_type)
+ entry = _get_task_scheduled_job_from_registry(task)
+ self.assertEqual(task_type, task.task_type)
self.assertEqual(scheduled_task_id, task.id)
assert_has_execution_with_status(task, "finished")
@@ -390,7 +399,7 @@ def test_admin_enable_job(self):
# arrange
self.client.login(username="admin", password="admin")
task = task_factory(self.task_type, enabled=False)
- self.assertIsNone(task.job_id)
+ self.assertIsNone(task.job_name)
self.assertFalse(task.is_scheduled())
data = {
"action": "enable_selected",
@@ -436,28 +445,18 @@ def test_admin_single_delete(self):
# arrange
self.client.login(username="admin", password="admin")
prev_count = Task.objects.filter(task_type=self.task_type).count()
- task = task_factory(
- self.task_type,
- )
- self.assertIsNotNone(task.job_id)
+ task = task_factory(self.task_type)
+ self.assertIsNotNone(task.job_name)
self.assertTrue(task.is_scheduled())
- prev = len(_get_executions(task))
- model = task._meta.model.__name__.lower()
- url = reverse(
- f"admin:scheduler_{model}_delete",
- args=[
- task.pk,
- ],
- )
- data = {
- "post": "yes",
- }
+ prev_executions_count = len(_get_executions(task))
+ url = reverse("admin:scheduler_task_delete", args=[task.pk])
+ data = dict(post="yes")
# act
res = self.client.post(url, data=data, follow=True)
# assert
self.assertEqual(200, res.status_code)
self.assertEqual(prev_count, Task.objects.filter(task_type=self.task_type).count())
- self.assertEqual(prev - 1, len(_get_executions(task)))
+ self.assertEqual(prev_executions_count - 1, len(_get_executions(task)))
def test_admin_delete_selected(self):
# arrange
@@ -465,9 +464,9 @@ def test_admin_delete_selected(self):
task = task_factory(self.task_type, enabled=True)
task.save()
queue = get_queue(task.queue)
- scheduled_jobs = queue.scheduled_job_registry.get_job_ids()
- job_id = task.job_id
- self.assertIn(job_id, scheduled_jobs)
+ scheduled_jobs = queue.scheduled_job_registry.all()
+ job_name = task.job_name
+ self.assertIn(job_name, scheduled_jobs)
data = {
"action": "delete_selected",
"_selected_action": [
@@ -483,8 +482,8 @@ def test_admin_delete_selected(self):
self.assertEqual(200, res.status_code)
assert_response_has_msg(res, "Successfully deleted 1 task.")
self.assertIsNone(Task.objects.filter(task_type=self.task_type).filter(id=task.id).first())
- scheduled_jobs = queue.scheduled_job_registry.get_job_ids()
- self.assertNotIn(job_id, scheduled_jobs)
+ scheduled_jobs = queue.scheduled_job_registry.all()
+ self.assertNotIn(job_name, scheduled_jobs)
class TestSchedulableTask(TestBaseTask):
# Currently ScheduledJob and RepeatableJob
@@ -507,5 +506,5 @@ def test_schedule_time_with_tz(self):
def test_result_ttl_passthrough(self):
job = task_factory(self.task_type, result_ttl=500)
- entry = _get_task_job_execution_from_registry(job)
- self.assertEqual(entry.result_ttl, 500)
+ entry = _get_task_scheduled_job_from_registry(job)
+ self.assertEqual(entry.success_ttl, 500)
diff --git a/scheduler/tests/test_views.py b/scheduler/tests/test_views.py
deleted file mode 100644
index 1c76334..0000000
--- a/scheduler/tests/test_views.py
+++ /dev/null
@@ -1,537 +0,0 @@
-import uuid
-from datetime import datetime
-from unittest.mock import patch, PropertyMock
-
-from django.contrib.auth.models import User
-from django.test import TestCase
-from django.test.client import Client
-from django.urls import reverse
-
-from scheduler.queues import get_queue
-from scheduler.rq_classes import JobExecution, ExecutionStatus
-from scheduler.tests import test_settings # noqa
-from scheduler.tests.jobs import failing_job, long_job, test_job
-from scheduler.tests.testtools import assert_message_in_response, task_factory, _get_task_job_execution_from_registry
-from scheduler.tools import create_worker, TaskType
-
-
-class BaseTestCase(TestCase):
- def setUp(self):
- self.user = User.objects.create_superuser("user", password="pass")
- self.client = Client()
- self.client.login(username=self.user.username, password="pass")
- get_queue("django_tasks_scheduler_test").connection.flushall()
-
-
-class SingleJobActionViewsTest(BaseTestCase):
- def test_single_job_action_unknown_job(self):
- res = self.client.get(reverse("queue_job_action", args=["unknown", "cancel"]), follow=True)
- self.assertEqual(400, res.status_code)
-
- def test_single_job_action_unknown_action(self):
- queue = get_queue("default")
- job = queue.enqueue(failing_job)
- worker = create_worker("default")
- worker.work(burst=True)
- job.refresh()
- self.assertTrue(job.is_failed)
- res = self.client.get(reverse("queue_job_action", args=[job.id, "unknown"]), follow=True)
- self.assertEqual(404, res.status_code)
-
- def test_single_job_action_requeue_job(self):
- queue = get_queue("default")
- job = queue.enqueue(failing_job)
- worker = create_worker("default")
- worker.work(burst=True)
- job.refresh()
- self.assertTrue(job.is_failed)
- res = self.client.get(reverse("queue_job_action", args=[job.id, "requeue"]), follow=True)
- self.assertEqual(200, res.status_code)
- self.client.post(reverse("queue_job_action", args=[job.id, "requeue"]), {"requeue": "Requeue"}, follow=True)
- self.assertIn(job, queue.jobs)
- job.delete()
-
- def test_single_job_action_delete_job(self):
- queue = get_queue("django_tasks_scheduler_test")
- job = queue.enqueue(test_job)
- res = self.client.get(reverse("queue_job_action", args=[job.id, "delete"]), follow=True)
- self.assertEqual(200, res.status_code)
- self.client.post(reverse("queue_job_action", args=[job.id, "delete"]), {"post": "yes"}, follow=True)
- self.assertFalse(JobExecution.exists(job.id, connection=queue.connection))
- self.assertNotIn(job.id, queue.get_job_ids())
-
- def test_single_job_action_cancel_job(self):
- queue = get_queue("django_tasks_scheduler_test")
- job = queue.enqueue(long_job)
- res = self.client.get(reverse("queue_job_action", args=[job.id, "cancel"]), follow=True)
- self.assertEqual(200, res.status_code)
- res = self.client.post(reverse("queue_job_action", args=[job.id, "cancel"]), {"post": "yes"}, follow=True)
- self.assertEqual(200, res.status_code)
- tmp = JobExecution.fetch(job.id, connection=queue.connection)
- self.assertTrue(tmp.is_canceled)
- self.assertNotIn(job.id, queue.get_job_ids())
-
- def test_single_job_action_cancel_job_that_is_already_cancelled(self):
- queue = get_queue("django_tasks_scheduler_test")
- job = queue.enqueue(long_job)
- res = self.client.post(reverse("queue_job_action", args=[job.id, "cancel"]), {"post": "yes"}, follow=True)
- self.assertEqual(200, res.status_code)
- tmp = JobExecution.fetch(job.id, connection=queue.connection)
- self.assertTrue(tmp.is_canceled)
- self.assertNotIn(job.id, queue.get_job_ids())
- res = self.client.post(reverse("queue_job_action", args=[job.id, "cancel"]), {"post": "yes"}, follow=True)
- self.assertEqual(200, res.status_code)
- assert_message_in_response(res, f"Could not perform action: Cannot cancel already canceled job: {job.id}")
-
- def test_single_job_action_enqueue_job(self):
- queue = get_queue("django_tasks_scheduler_test")
- job_list = []
- # enqueue some jobs that depends on other
- previous_job = None
- for _ in range(0, 3):
- job = queue.enqueue(test_job, depends_on=previous_job)
- job_list.append(job)
- previous_job = job
-
- # This job is deferred
-
- self.assertEqual(job_list[-1].get_status(), ExecutionStatus.DEFERRED)
- self.assertIsNone(job_list[-1].enqueued_at)
-
- # Try to force enqueue last job should do nothing
- res = self.client.get(reverse("queue_job_action", args=[job_list[-1].id, "enqueue"]), follow=True)
- self.assertEqual(200, res.status_code)
- res = self.client.post(reverse("queue_job_action", args=[job_list[-1].id, "enqueue"]), follow=True)
-
- # Check that job is still deferred because it has dependencies (rq 1.14 change)
- self.assertEqual(200, res.status_code)
- tmp = queue.fetch_job(job_list[-1].id)
- self.assertEqual(tmp.get_status(), ExecutionStatus.QUEUED)
- self.assertIsNotNone(tmp.enqueued_at)
-
-
-class JobListActionViewsTest(BaseTestCase):
- def test_job_list_action_delete_jobs__with_bad_next_url(self):
- queue = get_queue("django_tasks_scheduler_test")
-
- # enqueue some jobs
- job_ids = []
- for _ in range(0, 3):
- job = queue.enqueue(test_job)
- job_ids.append(job.id)
-
- # remove those jobs using view
- res = self.client.post(
- reverse(
- "queue_actions",
- args=[
- queue.name,
- ],
- ),
- {
- "action": "delete",
- "job_ids": job_ids,
- "next_url": "bad_url",
- },
- follow=True,
- )
- assert_message_in_response(res, "Bad followup URL")
- # check if jobs are removed
- self.assertEqual(200, res.status_code)
- for job_id in job_ids:
- self.assertFalse(JobExecution.exists(job_id, connection=queue.connection))
- self.assertNotIn(job_id, queue.job_ids)
-
- def test_job_list_action_delete_jobs(self):
- queue = get_queue("django_tasks_scheduler_test")
-
- # enqueue some jobs
- job_ids = []
- for _ in range(0, 3):
- job = queue.enqueue(test_job)
- job_ids.append(job.id)
-
- # remove those jobs using view
- res = self.client.post(
- reverse(
- "queue_actions",
- args=[
- queue.name,
- ],
- ),
- {"action": "delete", "job_ids": job_ids},
- follow=True,
- )
-
- # check if jobs are removed
- self.assertEqual(200, res.status_code)
- for job_id in job_ids:
- self.assertFalse(JobExecution.exists(job_id, connection=queue.connection))
- self.assertNotIn(job_id, queue.job_ids)
-
- def test_job_list_action_requeue_jobs(self):
- queue = get_queue("django_tasks_scheduler_test")
- queue_name = "django_tasks_scheduler_test"
-
- # enqueue some jobs that will fail
- jobs = []
- job_ids = []
- for _ in range(0, 3):
- job = queue.enqueue(failing_job)
- jobs.append(job)
- job_ids.append(job.id)
-
- # do those jobs = fail them
- worker = create_worker("django_tasks_scheduler_test")
- worker.work(burst=True)
-
- # check if all jobs are really failed
- for job in jobs:
- self.assertTrue(job.is_failed)
-
- # re-nqueue failed jobs from failed queue
- self.client.post(reverse("queue_actions", args=[queue_name]), {"action": "requeue", "job_ids": job_ids})
-
- # check if we requeue all failed jobs
- for job in jobs:
- self.assertFalse(job.is_failed)
-
- def test_job_list_action_stop_jobs(self):
- queue_name = "django_tasks_scheduler_test"
- queue = get_queue(queue_name)
-
- # Enqueue some jobs
- job_ids = []
- worker = create_worker("django_tasks_scheduler_test")
- for _ in range(3):
- job = queue.enqueue(test_job)
- job_ids.append(job.id)
- worker.prepare_job_execution(job)
-
- # Check if the jobs are started
- for job_id in job_ids:
- job = JobExecution.fetch(job_id, connection=queue.connection)
- self.assertEqual(job.get_status(), ExecutionStatus.STARTED)
-
- # Stop those jobs using the view
- started_job_registry = queue.started_job_registry
- self.assertEqual(len(started_job_registry), len(job_ids))
- self.client.post(reverse("queue_actions", args=[queue_name]), {"action": "stop", "job_ids": job_ids})
- self.assertEqual(len(started_job_registry), 0)
-
- canceled_job_registry = queue.canceled_job_registry
- self.assertEqual(len(canceled_job_registry), len(job_ids))
-
- for job_id in job_ids:
- self.assertIn(job_id, canceled_job_registry)
-
-
-class QueueRegistryJobsViewTest(BaseTestCase):
- def test_queue_jobs_unknown_registry(self):
- queue_name = "default"
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "unknown"]), follow=True)
- self.assertEqual(404, res.status_code)
-
- def test_queue_jobs_unknown_queue(self):
- res = self.client.get(reverse("queue_registry_jobs", args=["UNKNOWN", "queued"]))
- self.assertEqual(404, res.status_code)
-
- def test_queued_jobs(self):
- """Jobs in queue are displayed properly"""
- queue = get_queue("default")
- job = queue.enqueue(test_job)
- queue_name = "default"
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "queued"]))
- self.assertEqual(res.context["jobs"], [job])
-
- def test_finished_jobs(self):
- """Ensure that finished jobs page works properly."""
- queue = get_queue("django_tasks_scheduler_test")
- queue_name = "django_tasks_scheduler_test"
-
- job = queue.enqueue(test_job)
- registry = queue.finished_job_registry
- registry.add(job, 2)
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "finished"]))
- self.assertEqual(res.context["jobs"], [job])
-
- def test_failed_jobs(self):
- """Ensure that failed jobs page works properly."""
- queue = get_queue("django_tasks_scheduler_test")
- queue_name = "django_tasks_scheduler_test"
-
- # Test that page doesn't fail when FailedJobRegistry is empty
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "failed"]))
- self.assertEqual(res.status_code, 200)
-
- job = queue.enqueue(test_job)
- registry = queue.failed_job_registry
- registry.add(job, 2)
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "failed"]))
- self.assertEqual(res.context["jobs"], [job])
-
- def test_scheduled_jobs(self):
- """Ensure that scheduled jobs page works properly."""
- queue = get_queue("django_tasks_scheduler_test")
- queue_name = "django_tasks_scheduler_test"
-
- # Test that page doesn't fail when ScheduledJobRegistry is empty
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "scheduled"]))
- self.assertEqual(res.status_code, 200)
-
- job = queue.enqueue_at(datetime.now(), test_job)
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "scheduled"]))
- self.assertEqual(res.context["jobs"], [job])
-
- def test_scheduled_jobs_registry_removal(self):
- """Ensure that non-existing job is being deleted from registry by view"""
- queue = get_queue("django_tasks_scheduler_test")
- queue_name = "django_tasks_scheduler_test"
-
- registry = queue.scheduled_job_registry
- job = queue.enqueue_at(datetime.now(), test_job)
- self.assertEqual(len(registry), 1)
-
- queue.connection.delete(job.key)
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "scheduled"]))
- self.assertEqual(res.context["jobs"], [])
-
- self.assertEqual(len(registry), 0)
-
- def test_started_jobs(self):
- """Ensure that active jobs page works properly."""
- queue = get_queue("django_tasks_scheduler_test")
- queue_name = "django_tasks_scheduler_test"
-
- job = queue.enqueue(test_job)
- registry = queue.started_job_registry
- registry.add(job, 2)
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "started"]))
- self.assertEqual(res.context["jobs"], [job])
-
- def test_deferred_jobs(self):
- """Ensure that active jobs page works properly."""
- queue = get_queue("django_tasks_scheduler_test")
- queue_name = "django_tasks_scheduler_test"
-
- job = queue.enqueue(test_job)
- registry = queue.deferred_job_registry
- registry.add(job, 2)
- res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "deferred"]))
- self.assertEqual(res.context["jobs"], [job])
-
-
-class ViewTest(BaseTestCase):
- def test_job_details(self):
- """Job data is displayed properly"""
- queue = get_queue("default")
- job = queue.enqueue(test_job)
-
- url = reverse(
- "job_details",
- args=[
- job.id,
- ],
- )
- res = self.client.get(url)
- self.assertIn("job", res.context)
- self.assertEqual(res.context["job"], job)
-
- # This page shouldn't fail when job.data is corrupt
- queue.connection.hset(job.key, "data", "non-pickleable data")
- res = self.client.get(url)
- self.assertEqual(res.status_code, 200)
- self.assertIn("DeserializationError", res.content.decode())
-
- # Bad job-id should return 404
- url = reverse(
- "job_details",
- args=[
- "bad_job_id",
- ],
- )
- res = self.client.get(url)
- self.assertEqual(400, res.status_code)
-
- def test_scheduled_job_details(self):
- """Job data is displayed properly"""
- scheduled_job = task_factory(TaskType.ONCE, enabled=True)
- job = _get_task_job_execution_from_registry(scheduled_job)
-
- url = reverse(
- "job_details",
- args=[
- job.id,
- ],
- )
- res = self.client.get(url, follow=True)
- self.assertIn("job", res.context)
- self.assertEqual(res.context["job"], job)
-
- def test_job_details_on_deleted_dependency(self):
- """Page doesn't crash even if job.dependency has been deleted"""
- queue = get_queue("default")
-
- job = queue.enqueue(test_job)
- second_job = queue.enqueue(test_job, depends_on=job)
- job.delete()
- url = reverse("job_details", args=[second_job.id])
- res = self.client.get(url)
- self.assertEqual(res.status_code, 200)
- self.assertIn(second_job._dependency_id, res.content.decode())
-
- def test_requeue_all(self):
- """
- Ensure that re-queuing all failed job work properly
- """
- queue = get_queue("default")
- queue_name = "default"
- queue.enqueue(failing_job)
- queue.enqueue(failing_job)
- worker = create_worker("default")
- worker.work(burst=True)
-
- res = self.client.get(reverse("queue_requeue_all", args=[queue_name, "failed"]))
- self.assertEqual(res.context["total_jobs"], 2)
- # After requeue_all is called, jobs are enqueued
- res = self.client.post(reverse("queue_requeue_all", args=[queue_name, "failed"]))
- self.assertEqual(len(queue), 2)
-
- def test_requeue_all_if_deleted_job(self):
- """
- Ensure that re-queuing all failed job work properly
- """
- queue = get_queue("default")
- queue_name = "default"
- job = queue.enqueue(failing_job)
- queue.enqueue(failing_job)
- worker = create_worker("default")
- worker.work(burst=True)
-
- res = self.client.get(reverse("queue_requeue_all", args=[queue_name, "failed"]))
- self.assertEqual(res.context["total_jobs"], 2)
- job.delete()
-
- # After requeue_all is called, jobs are enqueued
- res = self.client.post(reverse("queue_requeue_all", args=[queue_name, "failed"]))
- self.assertEqual(len(queue), 1)
-
- def test_clear_queue_unknown_registry(self):
- queue_name = "django_tasks_scheduler_test"
- res = self.client.post(reverse("queue_clear", args=[queue_name, "unknown"]), {"post": "yes"})
- self.assertEqual(404, res.status_code)
-
- def test_clear_queue_enqueued(self):
- queue = get_queue("django_tasks_scheduler_test")
- job = queue.enqueue(test_job)
- self.client.post(reverse("queue_clear", args=[queue.name, "queued"]), {"post": "yes"})
- self.assertFalse(JobExecution.exists(job.id, connection=queue.connection))
- self.assertNotIn(job.id, queue.job_ids)
-
- def test_clear_queue_scheduled(self):
- queue = get_queue("django_tasks_scheduler_test")
- job = queue.enqueue_at(datetime.now(), test_job)
-
- res = self.client.get(reverse("queue_clear", args=[queue.name, "scheduled"]), follow=True)
- self.assertEqual(200, res.status_code)
- self.assertEqual(
- res.context["jobs"],
- [
- job,
- ],
- )
-
- res = self.client.post(reverse("queue_clear", args=[queue.name, "scheduled"]), {"post": "yes"}, follow=True)
- assert_message_in_response(res, f"You have successfully cleared the scheduled jobs in queue {queue.name}")
- self.assertEqual(200, res.status_code)
- self.assertFalse(JobExecution.exists(job.id, connection=queue.connection))
- self.assertNotIn(job.id, queue.job_ids)
-
- def test_workers_home(self):
- res = self.client.get(reverse("workers_home"))
- prev_workers = res.context["workers"]
- worker1 = create_worker("django_tasks_scheduler_test")
- worker1.register_birth()
- worker2 = create_worker("test3")
- worker2.register_birth()
-
- res = self.client.get(reverse("workers_home"))
- self.assertEqual(res.context["workers"], prev_workers + [worker1, worker2])
-
- def test_queue_workers(self):
- """Worker index page should show workers for a specific queue"""
- queue_name = "django_tasks_scheduler_test"
-
- worker1 = create_worker("django_tasks_scheduler_test")
- worker1.register_birth()
- worker2 = create_worker("test3")
- worker2.register_birth()
-
- res = self.client.get(reverse("queue_workers", args=[queue_name]))
- self.assertEqual(res.context["workers"], [worker1])
-
- def test_worker_details(self):
- """Worker index page should show workers for a specific queue"""
-
- worker = create_worker("django_tasks_scheduler_test", name=uuid.uuid4().hex)
- worker.register_birth()
-
- url = reverse(
- "worker_details",
- args=[
- worker.name,
- ],
- )
- res = self.client.get(url)
- self.assertEqual(res.context["worker"], worker)
-
- def test_worker_details__non_existing_worker(self):
- """Worker index page should show workers for a specific queue"""
-
- worker = create_worker("django_tasks_scheduler_test", name="WORKER")
- worker.register_birth()
-
- res = self.client.get(reverse("worker_details", args=["bad-worker-name"]))
- self.assertEqual(404, res.status_code)
-
- def test_statistics_json_view(self):
- # Override testing SCHEDULER_QUEUES
- queues = {
- "default": {
- "DB": 0,
- "HOST": "localhost",
- "PORT": 6379,
- }
- }
- with patch("scheduler.settings.QUEUES", new_callable=PropertyMock(return_value=queues)):
- res = self.client.get(reverse("queues_home"))
- self.assertEqual(res.status_code, 200)
-
- res = self.client.get(reverse("queues_home_json"))
- self.assertEqual(res.status_code, 200)
-
- # Not staff => return 404
- self.user.is_staff = False
- self.user.save()
-
- res = self.client.get(reverse("queues_home"))
- self.assertEqual(res.status_code, 302)
-
- # 404 code for stats
- res = self.client.get(reverse("queues_home_json"))
- self.assertEqual(res.status_code, 404)
-
- @staticmethod
- def token_validation(token: str) -> bool:
- return token == "valid"
-
- # @patch('scheduler.views.SCHEDULER_CONFIG')
- # def test_statistics_json_view_token(self, configuration):
- # configuration.get.return_value = ViewTest.token_validation
- # self.user.is_staff = False
- # self.user.save()
- # res = self.client.get(reverse('queues_home_json'), headers={'Authorization': 'valid'})
- # self.assertEqual(res.status_code, 200)
- #
- # res = self.client.get(reverse('queues_home_json'), headers={'Authorization': 'invalid'})
- # self.assertEqual(res.status_code, 404)
diff --git a/scheduler/tests/test_views/__init__.py b/scheduler/tests/test_views/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/scheduler/tests/test_views/base.py b/scheduler/tests/test_views/base.py
new file mode 100644
index 0000000..22c2c8b
--- /dev/null
+++ b/scheduler/tests/test_views/base.py
@@ -0,0 +1,14 @@
+from django.contrib.auth.models import User
+from django.test import TestCase
+from django.test.client import Client
+
+from scheduler.helpers.queues import get_queue
+from scheduler.tests import test_settings # noqa
+
+
+class BaseTestCase(TestCase):
+ def setUp(self):
+ self.user = User.objects.create_superuser("user", password="pass")
+ self.client = Client()
+ self.client.login(username=self.user.username, password="pass")
+ get_queue("django_tasks_scheduler_test").connection.flushall()
diff --git a/scheduler/tests/test_views/test_job_details.py b/scheduler/tests/test_views/test_job_details.py
new file mode 100644
index 0000000..7b89de9
--- /dev/null
+++ b/scheduler/tests/test_views/test_job_details.py
@@ -0,0 +1,190 @@
+import uuid
+from datetime import datetime
+from unittest.mock import patch, PropertyMock
+
+from django.urls import reverse
+
+from scheduler.settings_types import QueueConfiguration
+from scheduler.helpers.queues import get_queue
+from scheduler.helpers.tools import create_worker, TaskType
+from scheduler.redis_models import JobModel, WorkerModel
+from scheduler.tests import test_settings # noqa
+from scheduler.tests.jobs import failing_job, test_job
+from scheduler.tests.test_task_types.test_task_model import assert_response_has_msg
+from scheduler.tests.test_views.base import BaseTestCase
+from scheduler.tests.testtools import assert_message_in_response, task_factory, _get_task_scheduled_job_from_registry
+
+
+class TestViewJobDetails(BaseTestCase):
+ def test_job_details(self):
+ """Job data is displayed properly"""
+ queue = get_queue("default")
+ job = queue.create_and_enqueue_job(test_job)
+
+ url = reverse("job_details", args=[job.name])
+ res = self.client.get(url)
+ self.assertEqual(200, res.status_code)
+ self.assertIn("job", res.context)
+ self.assertEqual(res.context["job"], job)
+
+ # Bad job-id should return 404
+ url = reverse("job_details", args=["bad_job_name"])
+ res = self.client.get(url, follow=True)
+ self.assertEqual(200, res.status_code)
+ assert_response_has_msg(res, "Job bad_job_name does not exist, maybe its TTL has passed")
+
+ def test_scheduled_job_details(self):
+ """Job data is displayed properly"""
+ scheduled_job = task_factory(TaskType.ONCE, enabled=True)
+ job = _get_task_scheduled_job_from_registry(scheduled_job)
+
+ url = reverse(
+ "job_details",
+ args=[
+ job.name,
+ ],
+ )
+ res = self.client.get(url, follow=True)
+ self.assertIn("job", res.context)
+ self.assertEqual(res.context["job"], job)
+
+ def test_job_details_on_deleted_dependency(self):
+ """Page doesn't crash even if job.dependency has been deleted"""
+ queue = get_queue("default")
+
+ job = queue.create_and_enqueue_job(test_job)
+ second_job = queue.create_and_enqueue_job(test_job)
+ queue.delete_job(job.name)
+ url = reverse("job_details", args=[second_job.name])
+ res = self.client.get(url)
+ self.assertEqual(res.status_code, 200)
+
+ def test_requeue_all(self):
+ """Ensure that re-queuing all failed job work properly"""
+ queue = get_queue("default")
+ queue_name = "default"
+ queue.create_and_enqueue_job(failing_job)
+ queue.create_and_enqueue_job(failing_job)
+ worker = create_worker("default", burst=True)
+ worker.work()
+
+ res = self.client.get(reverse("queue_requeue_all", args=[queue_name, "failed"]))
+ self.assertEqual(res.context["total_jobs"], 2)
+ # After requeue_all is called, jobs are enqueued
+ res = self.client.post(reverse("queue_requeue_all", args=[queue_name, "failed"]))
+ self.assertEqual(len(queue), 4)
+
+ def test_requeue_all_if_deleted_job(self):
+ """
+ Ensure that re-queuing all failed job work properly
+ """
+ queue = get_queue("default")
+ queue_name = "default"
+ job = queue.create_and_enqueue_job(failing_job)
+ queue.create_and_enqueue_job(failing_job)
+ worker = create_worker("default", burst=True)
+ worker.work()
+
+ res = self.client.get(reverse("queue_requeue_all", args=[queue_name, "failed"]))
+ self.assertEqual(res.context["total_jobs"], 2)
+ queue.delete_job(job.name)
+
+ # After requeue_all is called, jobs are enqueued
+ res = self.client.post(reverse("queue_requeue_all", args=[queue_name, "failed"]))
+ self.assertEqual(len(queue.queued_job_registry), 1)
+
+ def test_clear_queue_unknown_registry(self):
+ queue_name = "django_tasks_scheduler_test"
+ res = self.client.post(reverse("queue_clear", args=[queue_name, "unknown"]), {"post": "yes"})
+ self.assertEqual(404, res.status_code)
+
+ def test_clear_queue_enqueued(self):
+ queue = get_queue("django_tasks_scheduler_test")
+ job = queue.create_and_enqueue_job(test_job)
+ self.client.post(reverse("queue_clear", args=[queue.name, "queued"]), {"post": "yes"})
+ self.assertFalse(JobModel.exists(job.name, connection=queue.connection), f"job {job.name} exists")
+ self.assertNotIn(job.name, queue.queued_job_registry.all())
+
+ def test_clear_queue_scheduled(self):
+ queue = get_queue("django_tasks_scheduler_test")
+ job = queue.create_and_enqueue_job(test_job, when=datetime.now())
+
+ res = self.client.get(reverse("queue_clear", args=[queue.name, "scheduled"]), follow=True)
+ self.assertEqual(200, res.status_code)
+ self.assertEqual(res.context["jobs"], [job])
+
+ res = self.client.post(reverse("queue_clear", args=[queue.name, "scheduled"]), {"post": "yes"}, follow=True)
+ assert_message_in_response(res, f"You have successfully cleared the scheduled jobs in queue {queue.name}")
+ self.assertEqual(200, res.status_code)
+ self.assertFalse(JobModel.exists(job.name, connection=queue.connection))
+ self.assertNotIn(job.name, queue.queued_job_registry.all())
+
+ def test_queue_workers(self):
+ """Worker index page should show workers for a specific queue"""
+ queue_name = "django_tasks_scheduler_test"
+
+ worker1 = create_worker(queue_name)
+ worker1.worker_start()
+ worker2 = create_worker("test3")
+ worker2.worker_start()
+
+ res = self.client.get(reverse("queue_workers", args=[queue_name]))
+ worker1_model = WorkerModel.get(worker1.name, connection=worker1.connection)
+ self.assertEqual(res.context["workers"], [worker1_model])
+
+ def test_worker_details(self):
+ """Worker index page should show workers for a specific queue"""
+
+ worker = create_worker("django_tasks_scheduler_test", name=uuid.uuid4().hex)
+ worker.worker_start()
+
+ url = reverse("worker_details", args=[worker.name])
+ res = self.client.get(url)
+ self.assertEqual(res.context["worker"], worker._model)
+
+ def test_worker_details__non_existing_worker(self):
+ """Worker index page should show workers for a specific queue"""
+
+ worker = create_worker("django_tasks_scheduler_test", name="WORKER")
+ worker.worker_start()
+
+ res = self.client.get(reverse("worker_details", args=["bad-worker-name"]))
+ self.assertEqual(404, res.status_code)
+
+ def test_statistics_json_view(self):
+ # Override testing SCHEDULER_QUEUES
+ queues = {
+ "default": QueueConfiguration(DB=0, HOST="localhost", PORT=6379),
+ }
+ with patch("scheduler.settings._QUEUES", new_callable=PropertyMock(return_value=queues)):
+ res = self.client.get(reverse("queues_home"))
+ self.assertEqual(res.status_code, 200)
+
+ res = self.client.get(reverse("queues_home_json"))
+ self.assertEqual(res.status_code, 200)
+
+ # Not staff => return 404
+ self.user.is_staff = False
+ self.user.save()
+
+ res = self.client.get(reverse("queues_home"))
+ self.assertEqual(res.status_code, 302)
+
+ # 404 code for stats
+ res = self.client.get(reverse("queues_home_json"))
+ self.assertEqual(res.status_code, 404)
+
+ @staticmethod
+ def token_validation(token: str) -> bool:
+ return token == "valid"
+
+ # @patch('scheduler.views.SCHEDULER_CONFIG')
+ # def test_statistics_json_view_token(self, configuration):
+ # configuration.get.return_value = ViewTest.token_validation
+ # self.user.is_staff = False
+ # self.user.save()
+ # res = self.client.get(reverse('queues_home_json'), headers={'Authorization': 'valid'})
+ # self.assertEqual(res.status_code, 200)
+ #
+ # res = self.client.get(reverse('queues_home_json'), headers={'Authorization': 'invalid'})
+ # self.assertEqual(res.status_code, 404)
diff --git a/scheduler/tests/test_views/test_queue_actions.py b/scheduler/tests/test_views/test_queue_actions.py
new file mode 100644
index 0000000..54d07c2
--- /dev/null
+++ b/scheduler/tests/test_views/test_queue_actions.py
@@ -0,0 +1,125 @@
+from django.urls import reverse
+
+from scheduler.helpers.queues import get_queue
+from scheduler.helpers.tools import create_worker
+from scheduler.redis_models import JobStatus, JobModel
+from scheduler.tests.jobs import failing_job, test_job
+from scheduler.tests.test_views.base import BaseTestCase
+from scheduler.tests.testtools import assert_message_in_response
+
+
+class QueueActionsViewsTest(BaseTestCase):
+ def test_job_list_action_delete_jobs__with_bad_next_url(self):
+ queue = get_queue("django_tasks_scheduler_test")
+
+ # enqueue some jobs
+ job_names = []
+ for _ in range(0, 3):
+ job = queue.create_and_enqueue_job(test_job, job_info_ttl=0)
+ job_names.append(job.name)
+
+ # remove those jobs using view
+ res = self.client.post(
+ reverse(
+ "queue_actions",
+ args=[
+ queue.name,
+ ],
+ ),
+ {
+ "action": "delete",
+ "job_names": job_names,
+ "next_url": "bad_url",
+ },
+ follow=True,
+ )
+ assert_message_in_response(res, "Bad followup URL")
+ # check if jobs are removed
+ self.assertEqual(200, res.status_code)
+ for job_name in job_names:
+ self.assertFalse(JobModel.exists(job_name, connection=queue.connection), f"job {job_name} exists")
+ self.assertNotIn(job_name, queue.queued_job_registry.all())
+
+ def test_job_list_action_delete_jobs(self):
+ queue = get_queue("django_tasks_scheduler_test")
+
+ # enqueue some jobs
+ job_names = []
+ for _ in range(0, 3):
+ job = queue.create_and_enqueue_job(test_job, job_info_ttl=0)
+ job_names.append(job.name)
+
+ # remove those jobs using view
+ res = self.client.post(
+ reverse(
+ "queue_actions",
+ args=[
+ queue.name,
+ ],
+ ),
+ {"action": "delete", "job_names": job_names},
+ follow=True,
+ )
+
+ # check if jobs are removed
+ self.assertEqual(200, res.status_code)
+ for job_name in job_names:
+ self.assertFalse(JobModel.exists(job_name, connection=queue.connection), f"job {job_name} exists")
+ self.assertNotIn(job_name, queue.queued_job_registry.all())
+
+ def test_job_list_action_requeue_jobs(self):
+ queue_name = "django_tasks_scheduler_test"
+ queue = get_queue(queue_name)
+
+ # enqueue some jobs that will fail
+ job_names = []
+ for _ in range(0, 3):
+ job = queue.create_and_enqueue_job(failing_job)
+ job_names.append(job.name)
+
+ # do those jobs = fail them
+ worker = create_worker(queue_name, burst=True)
+ worker.work()
+
+ # check if all jobs are really failed
+ for job_name in job_names:
+ job = JobModel.get(job_name, connection=queue.connection)
+ self.assertTrue(job.is_failed)
+
+ # re-nqueue failed jobs from failed queue
+ self.client.post(reverse("queue_actions", args=[queue_name]), {"action": "requeue", "job_names": job_names})
+
+ # check if we requeue all failed jobs
+ for job_name in job_names:
+ job = JobModel.get(job_name, connection=queue.connection)
+ self.assertFalse(job.is_failed)
+
+ def test_job_list_action_stop_jobs__move_to_finished_registry(self):
+ queue_name = "django_tasks_scheduler_test"
+ queue = get_queue(queue_name)
+
+ # Enqueue some jobs
+ job_names = []
+ worker = create_worker(queue_name)
+ worker.bootstrap()
+ for _ in range(3):
+ job = queue.create_and_enqueue_job(test_job)
+ job_names.append(job.name)
+ worker.worker_before_execution(job, connection=queue.connection)
+ job.prepare_for_execution(worker.name, queue.active_job_registry, connection=queue.connection)
+
+ # Check if the jobs are started
+ for job_name in job_names:
+ job = JobModel.get(job_name, connection=queue.connection)
+ self.assertEqual(job.status, JobStatus.STARTED)
+
+ # Stop those jobs using the view
+ self.assertEqual(len(queue.active_job_registry), len(job_names))
+ self.client.post(reverse("queue_actions", args=[queue_name]), {"action": "stop", "job_names": job_names})
+ self.assertEqual(0, len(queue.active_job_registry))
+
+ self.assertEqual(0, len(queue.canceled_job_registry))
+ self.assertEqual(len(job_names), len(queue.finished_job_registry))
+
+ for job_name in job_names:
+ self.assertIn(job_name, queue.finished_job_registry)
diff --git a/scheduler/tests/test_views/test_queue_job_action.py b/scheduler/tests/test_views/test_queue_job_action.py
new file mode 100644
index 0000000..e305253
--- /dev/null
+++ b/scheduler/tests/test_views/test_queue_job_action.py
@@ -0,0 +1,122 @@
+from django.urls import reverse
+
+from scheduler.helpers.queues import get_queue
+from scheduler.helpers.tools import create_worker
+from scheduler.redis_models import JobStatus, JobModel
+from scheduler.tests.jobs import failing_job, long_job, test_job
+from scheduler.tests.testtools import assert_message_in_response
+from .base import BaseTestCase
+from ..test_task_types.test_task_model import assert_response_has_msg
+
+
+class SingleJobActionViewsTest(BaseTestCase):
+ def test_single_job_action_unknown_job(self):
+ res = self.client.get(reverse("queue_job_action", args=["unknown", "cancel"]), follow=True)
+ self.assertEqual(200, res.status_code)
+ assert_response_has_msg(res, "Job unknown does not exist, maybe its TTL has passed")
+
+ def test_single_job_action_unknown_action(self):
+ queue = get_queue("default")
+ job = queue.create_and_enqueue_job(failing_job)
+ worker = create_worker("default", burst=True)
+ worker.work()
+ job = JobModel.get(job.name, connection=queue.connection)
+ self.assertTrue(job.is_failed)
+ res = self.client.get(reverse("queue_job_action", args=[job.name, "unknown"]), follow=True)
+ self.assertEqual(400, res.status_code)
+
+ def test_single_job_action_requeue_job(self):
+ queue = get_queue("default")
+ job = queue.create_and_enqueue_job(failing_job)
+ worker = create_worker("default",burst=True)
+ worker.work()
+ job = JobModel.get(job.name, connection=queue.connection)
+ self.assertTrue(job.is_failed)
+ res = self.client.get(reverse("queue_job_action", args=[job.name, "requeue"]), follow=True)
+ self.assertEqual(200, res.status_code)
+ self.client.post(reverse("queue_job_action", args=[job.name, "requeue"]), {"requeue": "Requeue"}, follow=True)
+ self.assertIn(job, JobModel.get_many(queue.queued_job_registry.all(), queue.connection))
+ queue.delete_job(job.name)
+
+ def test_single_job_action_delete_job(self):
+ queue = get_queue("default")
+ job = queue.create_and_enqueue_job(test_job, job_info_ttl=0)
+ res = self.client.get(reverse("queue_job_action", args=[job.name, "delete"]), follow=True)
+ self.assertEqual(200, res.status_code)
+ self.client.post(reverse("queue_job_action", args=[job.name, "delete"]), {"post": "yes"}, follow=True)
+ self.assertFalse(JobModel.exists(job.name, connection=queue.connection))
+ self.assertNotIn(job.name, queue.queued_job_registry.all())
+
+ def test_single_job_action_cancel_job(self):
+ queue = get_queue("django_tasks_scheduler_test")
+ job = queue.create_and_enqueue_job(long_job)
+ self.assertTrue(job.is_queued)
+ job = JobModel.get(job.name, connection=queue.connection)
+ res = self.client.get(reverse("queue_job_action", args=[job.name, "cancel"]), follow=True)
+ self.assertEqual(200, res.status_code)
+ res = self.client.post(reverse("queue_job_action", args=[job.name, "cancel"]), {"post": "yes"}, follow=True)
+ self.assertEqual(200, res.status_code)
+ job = JobModel.get(job.name, connection=queue.connection)
+ self.assertTrue(job.is_canceled)
+ self.assertNotIn(job.name, queue.queued_job_registry.all())
+
+ def test_single_job_action_cancel_job_that_is_already_cancelled(self):
+ queue = get_queue("django_tasks_scheduler_test")
+ job = queue.create_and_enqueue_job(long_job)
+ res = self.client.post(reverse("queue_job_action", args=[job.name, "cancel"]), {"post": "yes"}, follow=True)
+ self.assertEqual(200, res.status_code)
+ tmp = JobModel.get(job.name, connection=queue.connection)
+ self.assertTrue(tmp.is_canceled)
+ self.assertNotIn(job.name, queue.queued_job_registry.all())
+ res = self.client.post(reverse("queue_job_action", args=[job.name, "cancel"]), {"post": "yes"}, follow=True)
+ self.assertEqual(200, res.status_code)
+ assert_message_in_response(res, f"Could not perform action: Cannot cancel already canceled job: {job.name}")
+
+ def test_single_job_action_enqueue_job(self):
+ queue = get_queue("django_tasks_scheduler_test")
+ job_list = []
+ # enqueue some jobs that depends on other
+ for _ in range(0, 3):
+ job = queue.create_and_enqueue_job(test_job)
+ job_list.append(job)
+
+ # This job is deferred
+
+ self.assertEqual(job_list[-1].get_status(connection=queue.connection), JobStatus.QUEUED)
+ self.assertIsNotNone(job_list[-1].enqueued_at)
+
+ # Try to force enqueue last job should do nothing
+ res = self.client.get(reverse("queue_job_action", args=[job_list[-1].name, "enqueue"]), follow=True)
+ self.assertEqual(200, res.status_code)
+ res = self.client.post(reverse("queue_job_action", args=[job_list[-1].name, "enqueue"]), follow=True)
+
+ self.assertEqual(200, res.status_code)
+ tmp = JobModel.get(job_list[-1].name, connection=queue.connection)
+ self.assertEqual(tmp.get_status(connection=queue.connection), JobStatus.QUEUED)
+ self.assertIsNotNone(tmp.enqueued_at)
+
+
+
+ def test_single_job_action_enqueue_job_sync_queue(self):
+ queue = get_queue("scheduler_scheduler_active_test")
+ job_list = []
+ # enqueue some jobs that depends on other
+ for _ in range(0, 3):
+ job = queue.create_and_enqueue_job(test_job)
+ job_list.append(job)
+
+ # This job is deferred
+
+ self.assertEqual(job_list[-1].get_status(connection=queue.connection), JobStatus.FAILED)
+ self.assertIsNotNone(job_list[-1].enqueued_at)
+
+ # Try to force enqueue last job should do nothing
+ res = self.client.get(reverse("queue_job_action", args=[job_list[-1].name, "enqueue"]), follow=True)
+ self.assertEqual(200, res.status_code)
+ res = self.client.post(reverse("queue_job_action", args=[job_list[-1].name, "enqueue"]), follow=True)
+
+ self.assertEqual(200, res.status_code)
+ tmp = JobModel.get(job_list[-1].name, connection=queue.connection)
+ self.assertEqual(tmp.get_status(connection=queue.connection), JobStatus.FINISHED)
+ self.assertIsNotNone(tmp.enqueued_at)
+
diff --git a/scheduler/tests/test_views/test_queue_registry_jobs.py b/scheduler/tests/test_views/test_queue_registry_jobs.py
new file mode 100644
index 0000000..4b7bfe9
--- /dev/null
+++ b/scheduler/tests/test_views/test_queue_registry_jobs.py
@@ -0,0 +1,92 @@
+import time
+from datetime import datetime
+
+from django.urls import reverse
+
+from scheduler.helpers.queues import get_queue
+from scheduler.tests.jobs import test_job
+from scheduler.tests.test_views.base import BaseTestCase
+
+
+class QueueRegistryJobsViewTest(BaseTestCase):
+ def test_queue_jobs_unknown_registry(self):
+ queue_name = "default"
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "unknown"]), follow=True)
+ self.assertEqual(404, res.status_code)
+
+ def test_queue_jobs_unknown_queue(self):
+ res = self.client.get(reverse("queue_registry_jobs", args=["UNKNOWN", "queued"]))
+ self.assertEqual(404, res.status_code)
+
+ def test_queued_jobs(self):
+ """Jobs in queue are displayed properly"""
+ queue = get_queue("default")
+ job = queue.create_and_enqueue_job(test_job)
+ queue_name = "default"
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "queued"]))
+ self.assertEqual(res.context["jobs"], [job])
+
+ def test_finished_jobs(self):
+ """Ensure that finished jobs page works properly."""
+ queue = get_queue("django_tasks_scheduler_test")
+ queue_name = "django_tasks_scheduler_test"
+
+ job = queue.create_and_enqueue_job(test_job)
+ registry = queue.finished_job_registry
+ registry.add(queue.connection, job.name, time.time() + 2)
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "finished"]))
+ self.assertEqual(res.context["jobs"], [job])
+
+ def test_failed_jobs(self):
+ """Ensure that failed jobs page works properly."""
+ queue = get_queue("django_tasks_scheduler_test")
+ queue_name = "django_tasks_scheduler_test"
+
+ # Test that page doesn't fail when FailedJobRegistry is empty
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "failed"]))
+ self.assertEqual(res.status_code, 200)
+
+ job = queue.create_and_enqueue_job(test_job)
+ registry = queue.failed_job_registry
+ registry.add(queue.connection, job.name, time.time() + 20)
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "failed"]))
+ self.assertEqual(res.context["jobs"], [job])
+
+ def test_scheduled_jobs(self):
+ """Ensure that scheduled jobs page works properly."""
+ queue = get_queue("django_tasks_scheduler_test")
+ queue_name = "django_tasks_scheduler_test"
+
+ # Test that page doesn't fail when ScheduledJobRegistry is empty
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "scheduled"]))
+ self.assertEqual(res.status_code, 200)
+
+ job = queue.create_and_enqueue_job(test_job, when=datetime.now())
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "scheduled"]))
+ self.assertEqual(res.context["jobs"], [job])
+
+ def test_scheduled_jobs_registry_removal(self):
+ """Ensure that non-existing job is being deleted from registry by view"""
+ queue = get_queue("django_tasks_scheduler_test")
+ queue_name = "django_tasks_scheduler_test"
+
+ registry = queue.scheduled_job_registry
+ job = queue.create_and_enqueue_job(test_job, when=datetime.now())
+ self.assertEqual(len(registry), 1)
+
+ queue.delete_job(job.name)
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "scheduled"]))
+ self.assertEqual(res.context["jobs"], [])
+
+ self.assertEqual(len(registry), 0)
+
+ def test_started_jobs(self):
+ """Ensure that active jobs page works properly."""
+ queue = get_queue("django_tasks_scheduler_test")
+ queue_name = "django_tasks_scheduler_test"
+
+ job = queue.create_and_enqueue_job(test_job)
+ registry = queue.active_job_registry
+ registry.add(queue.connection, job.name, time.time() + 20)
+ res = self.client.get(reverse("queue_registry_jobs", args=[queue_name, "active"]))
+ self.assertEqual(res.context["jobs"], [job])
diff --git a/scheduler/tests/test_views/test_workers_view.py b/scheduler/tests/test_views/test_workers_view.py
new file mode 100644
index 0000000..b6e8a9c
--- /dev/null
+++ b/scheduler/tests/test_views/test_workers_view.py
@@ -0,0 +1,18 @@
+from django.urls import reverse
+
+from scheduler.helpers.tools import create_worker
+from scheduler.tests import test_settings # noqa
+from scheduler.tests.test_views.base import BaseTestCase
+
+
+class TestViewWorkers(BaseTestCase):
+ def test_workers_home(self):
+ res = self.client.get(reverse("workers_home"))
+ prev_workers = res.context["workers"]
+ worker1 = create_worker("django_tasks_scheduler_test")
+ worker1.worker_start()
+ worker2 = create_worker("test3")
+ worker2.worker_start()
+
+ res = self.client.get(reverse("workers_home"))
+ self.assertEqual(res.context["workers"], prev_workers + [worker1._model, worker2._model])
diff --git a/scheduler/tests/test_worker/__init__.py b/scheduler/tests/test_worker/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/scheduler/tests/test_worker/test_scheduler.py b/scheduler/tests/test_worker/test_scheduler.py
new file mode 100644
index 0000000..4f5de69
--- /dev/null
+++ b/scheduler/tests/test_worker/test_scheduler.py
@@ -0,0 +1,38 @@
+from datetime import timedelta
+
+import time_machine
+from django.utils import timezone
+
+from scheduler.helpers.tools import create_worker
+from scheduler.models.task import TaskType
+from scheduler.tests.testtools import SchedulerBaseCase, task_factory
+from scheduler.worker.scheduler import WorkerScheduler
+
+
+class TestWorkerScheduler(SchedulerBaseCase):
+ def test_create_worker_with_scheduler__scheduler_started(self):
+ worker = create_worker("default", name="test", burst=True, with_scheduler=True)
+ worker.bootstrap()
+ self.assertIsNotNone(worker.scheduler)
+ worker.stop_scheduler()
+ self.assertIsNone(worker.scheduler)
+
+ def test_scheduler_schedules_tasks(self):
+ with time_machine.travel(0.0, tick=False) as traveller:
+ # arrange
+ task = task_factory(TaskType.ONCE, scheduled_time=timezone.now() + timedelta(milliseconds=40))
+ self.assertIsNotNone(task.job_name)
+ self.assertNotIn(task.job_name, task.rqueue.queued_job_registry)
+ self.assertIn(task.job_name, task.rqueue.scheduled_job_registry)
+
+ scheduler = WorkerScheduler([task.rqueue, ], worker_name="fake-worker", connection=task.rqueue.connection)
+
+ # act
+ traveller.move_to(50)
+ scheduler._acquire_locks()
+ scheduler.enqueue_scheduled_jobs()
+
+ # assert
+ self.assertIsNotNone(task.job_name)
+ self.assertIn(task.job_name, task.rqueue.queued_job_registry)
+ self.assertNotIn(task.job_name, task.rqueue.scheduled_job_registry)
diff --git a/scheduler/tests/test_worker/test_worker_commands.py b/scheduler/tests/test_worker/test_worker_commands.py
new file mode 100644
index 0000000..5287b61
--- /dev/null
+++ b/scheduler/tests/test_worker/test_worker_commands.py
@@ -0,0 +1,84 @@
+from time import sleep
+
+from django.test import tag
+
+from scheduler.helpers.queues import get_queue
+from scheduler.redis_models import JobStatus, JobModel, WorkerModel
+from scheduler.tests.jobs import long_job, two_seconds_job
+from .. import testtools
+from ..test_views.base import BaseTestCase
+from ...worker.commands import KillWorkerCommand, send_command, StopJobCommand
+
+
+@tag("multiprocess")
+class WorkerCommandsTest(BaseTestCase):
+ def test_kill_job_command__current_job(self):
+ # Arrange
+ queue = get_queue("django_tasks_scheduler_test")
+ job = queue.create_and_enqueue_job(long_job)
+ self.assertTrue(job.is_queued)
+ process, worker_name = testtools.run_worker_in_process("django_tasks_scheduler_test")
+ sleep(0.1)
+ job = JobModel.get(job.name, connection=queue.connection)
+ self.assertEqual(JobStatus.STARTED, job.status)
+
+ # Act
+ send_command(queue.connection, StopJobCommand(worker_name=worker_name, job_name=job.name))
+
+ # Assert
+
+ process.terminate()
+ process.join(2)
+ process.kill()
+
+ job = JobModel.get(job.name, connection=queue.connection)
+ worker_model = WorkerModel.get(worker_name, connection=queue.connection)
+ self.assertEqual(job.name, worker_model.stopped_job_name)
+ self.assertEqual(job.name, worker_model.current_job_name)
+ self.assertEqual(0, worker_model.completed_jobs)
+ self.assertEqual(0, worker_model.failed_job_count)
+ self.assertEqual(0, worker_model.successful_job_count)
+ self.assertEqual(JobStatus.STOPPED, job.status)
+ self.assertNotIn(job.name, queue.queued_job_registry.all())
+
+ def test_kill_job_command__different_job(self):
+ # Arrange
+ queue = get_queue("django_tasks_scheduler_test")
+ job = queue.create_and_enqueue_job(two_seconds_job)
+ self.assertTrue(job.is_queued)
+ process, worker_name = testtools.run_worker_in_process("django_tasks_scheduler_test")
+ sleep(0.2)
+ job = JobModel.get(job.name, connection=queue.connection)
+ self.assertEqual(JobStatus.STARTED, job.status)
+
+ # Act
+ send_command(queue.connection, StopJobCommand(worker_name=worker_name, job_name=job.name + "1"))
+ sleep(0.1)
+ process.kill()
+ process.join()
+ # Assert
+ job = JobModel.get(job.name, connection=queue.connection)
+ self.assertEqual(JobStatus.STARTED, job.status)
+ self.assertNotIn(job.name, queue.queued_job_registry.all())
+ worker_model = WorkerModel.get(worker_name, connection=queue.connection)
+ self.assertEqual(0, worker_model.completed_jobs)
+ self.assertEqual(0, worker_model.failed_job_count)
+ self.assertEqual(0, worker_model.successful_job_count)
+ self.assertIsNone(worker_model.stopped_job_name)
+ self.assertEqual(job.name, worker_model.current_job_name)
+
+ def test_kill_worker_command(self):
+ queue = get_queue("django_tasks_scheduler_test")
+ process, worker_name = testtools.run_worker_in_process("django_tasks_scheduler_test")
+ sleep(0.1)
+ # act
+ send_command(queue.connection, KillWorkerCommand(worker_name=worker_name))
+ # assert
+ sleep(0.2)
+ process.kill()
+ process.join()
+ worker_model = WorkerModel.get(worker_name, connection=queue.connection)
+ self.assertEqual(0, worker_model.completed_jobs)
+ self.assertEqual(0, worker_model.failed_job_count)
+ self.assertEqual(0, worker_model.successful_job_count)
+ self.assertIsNotNone(worker_model.shutdown_requested_date)
diff --git a/scheduler/tests/test_worker.py b/scheduler/tests/test_worker/test_worker_creation.py
similarity index 60%
rename from scheduler/tests/test_worker.py
rename to scheduler/tests/test_worker/test_worker_creation.py
index 4b40bfb..3fed556 100644
--- a/scheduler/tests/test_worker.py
+++ b/scheduler/tests/test_worker/test_worker_creation.py
@@ -1,26 +1,25 @@
import os
import uuid
-from rq.job import Job
-from scheduler.rq_classes import JobExecution
+from scheduler import settings
+from scheduler.helpers.queues.getters import QueueConnectionDiscrepancyError
+from scheduler.helpers.tools import create_worker
+from scheduler.tests import test_settings # noqa
from scheduler.tests.testtools import SchedulerBaseCase
-from scheduler.tools import create_worker
-from . import test_settings # noqa
-from .. import settings
class TestWorker(SchedulerBaseCase):
def test_create_worker__two_workers_same_queue(self):
worker1 = create_worker("default", "django_tasks_scheduler_test")
- worker1.register_birth()
+ worker1.worker_start()
worker2 = create_worker("default")
- worker2.register_birth()
+ worker2.worker_start()
hostname = os.uname()[1]
self.assertEqual(f"{hostname}-worker.1", worker1.name)
self.assertEqual(f"{hostname}-worker.2", worker2.name)
def test_create_worker__worker_with_queues_different_connection(self):
- with self.assertRaises(ValueError):
+ with self.assertRaises(QueueConnectionDiscrepancyError):
create_worker("default", "test1")
def test_create_worker__with_name(self):
@@ -36,17 +35,9 @@ def test_create_worker__with_name_containing_slash(self):
def test_create_worker__scheduler_interval(self):
prev = settings.SCHEDULER_CONFIG.SCHEDULER_INTERVAL
settings.SCHEDULER_CONFIG.SCHEDULER_INTERVAL = 1
- worker = create_worker("default")
- worker.work(burst=True)
+ worker = create_worker("default", name="test", burst=True, with_scheduler=True)
+ worker.bootstrap()
+ self.assertEqual(worker.name, "test")
self.assertEqual(worker.scheduler.interval, 1)
settings.SCHEDULER_CONFIG.SCHEDULER_INTERVAL = prev
-
- def test_get_worker_with_custom_job_class(self):
- # Test with string representation of job_class
- worker = create_worker("default", job_class="scheduler.rq_classes.JobExecution")
- self.assertTrue(issubclass(worker.job_class, Job))
- self.assertTrue(issubclass(worker.job_class, JobExecution))
-
- def test_get_worker_without_custom_job_class(self):
- worker = create_worker("default")
- self.assertTrue(issubclass(worker.job_class, JobExecution))
+ worker.teardown()
diff --git a/scheduler/tests/testtools.py b/scheduler/tests/testtools.py
index 2c987b5..26d6464 100644
--- a/scheduler/tests/testtools.py
+++ b/scheduler/tests/testtools.py
@@ -1,4 +1,6 @@
+import multiprocessing
from datetime import timedelta
+from typing import List, Tuple
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
@@ -7,10 +9,28 @@
from django.utils import timezone
from scheduler import settings
+from scheduler.admin.task_admin import job_execution_of
+from scheduler.helpers.queues import get_queue
+from scheduler.helpers.tools import create_worker
from scheduler.models.args import TaskKwarg
-from scheduler.models.task import Task
-from scheduler.queues import get_queue
-from scheduler.tools import TaskType
+from scheduler.models.task import Task, TaskType
+from scheduler.redis_models import JobModel
+from scheduler.worker.worker import Worker
+
+multiprocessing.set_start_method("fork")
+
+
+def _run_worker_process(worker: Worker, **kwargs):
+ worker.work(**kwargs)
+
+
+def run_worker_in_process(*args, name="test-worker") -> Tuple[multiprocessing.Process, str]:
+ worker = create_worker(*args, name=name, fork_job_execution=False)
+ process = multiprocessing.Process(
+ target=_run_worker_process, args=(worker,), kwargs=dict(with_scheduler=False)
+ )
+ process.start()
+ return process, name
def assert_message_in_response(response, message):
@@ -29,12 +49,11 @@ def sequence_gen():
def task_factory(
- task_type: TaskType, callable_name: str = "scheduler.tests.jobs.test_job", instance_only=False, **kwargs
+ task_type: TaskType, callable_name: str = "scheduler.tests.jobs.test_job", instance_only=False, **kwargs
):
values = dict(
name="Scheduled Job %d" % next(seq),
- job_id=None,
- queue=list(settings.QUEUES.keys())[0],
+ queue=list(settings._QUEUES.keys())[0],
callable=callable_name,
enabled=True,
timeout=None,
@@ -88,17 +107,16 @@ def taskarg_factory(cls, **kwargs):
return instance
-def _get_task_job_execution_from_registry(django_task: Task):
- jobs_to_schedule = django_task.rqueue.scheduled_job_registry.get_job_ids()
- entry = next(i for i in jobs_to_schedule if i == django_task.job_id)
- return django_task.rqueue.fetch_job(entry)
+def _get_task_scheduled_job_from_registry(django_task: Task) -> JobModel:
+ jobs_to_schedule = django_task.rqueue.scheduled_job_registry.all()
+ entry = next(i for i in jobs_to_schedule if i == django_task.job_name)
+ return JobModel.get(entry, connection=django_task.rqueue.connection)
-def _get_executions(django_job: Task):
- job_ids = django_job.rqueue.get_all_job_ids()
- return list(
- filter(lambda j: j.is_execution_of(django_job), map(lambda jid: django_job.rqueue.fetch_job(jid), job_ids))
- )
+def _get_executions(task: Task):
+ job_names = task.rqueue.get_all_job_names()
+ job_list: List[JobModel] = JobModel.get_many(job_names, connection=task.rqueue.connection)
+ return list(filter(lambda j: job_execution_of(j, task), job_list))
class SchedulerBaseCase(TestCase):
@@ -114,15 +132,9 @@ def setUpTestData(cls) -> None:
def setUp(self) -> None:
super(SchedulerBaseCase, self).setUp()
queue = get_queue("default")
- queue.empty()
+ queue.connection.flushall()
def tearDown(self) -> None:
super(SchedulerBaseCase, self).tearDown()
queue = get_queue("default")
queue.empty()
-
- @classmethod
- def setUpClass(cls):
- super(SchedulerBaseCase, cls).setUpClass()
- queue = get_queue("default")
- queue.connection.flushall()
diff --git a/scheduler/timeouts.py b/scheduler/timeouts.py
new file mode 100644
index 0000000..d07f725
--- /dev/null
+++ b/scheduler/timeouts.py
@@ -0,0 +1,111 @@
+import ctypes
+import signal
+import threading
+
+
+class BaseTimeoutException(Exception):
+ """Base exception for timeouts."""
+ pass
+
+
+class JobTimeoutException(BaseTimeoutException):
+ """Raised when a job takes longer to complete than the allowed maximum timeout value."""
+ pass
+
+
+class JobExecutionMonitorTimeoutException(BaseTimeoutException):
+ """Raised when waiting for a job-execution-process exiting takes longer than the maximum timeout value."""
+ pass
+
+
+class BaseDeathPenalty:
+ """Base class to setup job timeouts."""
+
+ def __init__(self, timeout, exception=BaseTimeoutException, **kwargs):
+ self._timeout = timeout
+ self._exception = exception
+
+ def __enter__(self):
+ self.setup_death_penalty()
+
+ def __exit__(self, type, value, traceback):
+ # Always cancel immediately, since we're done
+ try:
+ self.cancel_death_penalty()
+ except BaseTimeoutException:
+ # Weird case: we're done with the with body, but now the alarm is fired. We may safely ignore this
+ # situation and consider the body done.
+ pass
+
+ # __exit__ may return True to supress further exception handling. We don't want to suppress any exceptions
+ # here, since all errors should just pass through, BaseTimeoutException being handled normally to the invoking
+ # context.
+ return False
+
+ def setup_death_penalty(self):
+ raise NotImplementedError()
+
+ def cancel_death_penalty(self):
+ raise NotImplementedError()
+
+
+class UnixSignalDeathPenalty(BaseDeathPenalty):
+ def handle_death_penalty(self, signum, frame) -> None:
+ raise self._exception("Task exceeded maximum timeout value ({0} seconds)".format(self._timeout))
+
+ def setup_death_penalty(self) -> None:
+ """Sets up an alarm signal and a signal handler that raises an exception after the timeout amount (expressed
+ in seconds)."""
+ signal.signal(signal.SIGALRM, self.handle_death_penalty)
+ signal.alarm(self._timeout)
+
+ def cancel_death_penalty(self) -> None:
+ """Removes the death penalty alarm and puts back the system into default signal handling."""
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, signal.SIG_DFL)
+
+
+class TimerDeathPenalty(BaseDeathPenalty):
+ def __init__(self, timeout, exception=JobTimeoutException, **kwargs):
+ super().__init__(timeout, exception, **kwargs)
+ self._target_thread_id = threading.current_thread().ident
+ self._timer = None
+
+ # Monkey-patch exception with the message ahead of time
+ # since PyThreadState_SetAsyncExc can only take a class
+ def init_with_message(self, *args, **kwargs): # noqa
+ super(exception, self).__init__("Task exceeded maximum timeout value ({0} seconds)".format(timeout))
+
+ self._exception.__init__ = init_with_message
+
+ def new_timer(self):
+ """Returns a new timer since timers can only be used once."""
+ return threading.Timer(self._timeout, self.handle_death_penalty)
+
+ def handle_death_penalty(self):
+ """Raises an asynchronous exception in another thread.
+
+ Reference http://docs.python.org/c-api/init.html#PyThreadState_SetAsyncExc for more info.
+ """
+ ret = ctypes.pythonapi.PyThreadState_SetAsyncExc(
+ ctypes.c_long(self._target_thread_id), ctypes.py_object(self._exception)
+ )
+ if ret == 0:
+ raise ValueError(f"Invalid thread ID {self._target_thread_id}")
+ elif ret > 1:
+ ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self._target_thread_id), 0)
+ raise SystemError("PyThreadState_SetAsyncExc failed")
+
+ def setup_death_penalty(self):
+ """Starts the timer."""
+ if self._timeout <= 0:
+ return
+ self._timer = self.new_timer()
+ self._timer.start()
+
+ def cancel_death_penalty(self):
+ """Cancels the timer."""
+ if self._timeout <= 0:
+ return
+ self._timer.cancel()
+ self._timer = None
diff --git a/scheduler/tools.py b/scheduler/tools.py
deleted file mode 100644
index c73aae8..0000000
--- a/scheduler/tools.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import importlib
-import os
-from typing import List, Any, Callable, Optional
-
-import croniter
-from django.apps import apps
-from django.db import models
-from django.utils import timezone
-from django.utils.module_loading import import_string
-from django.utils.translation import gettext_lazy as _
-
-from scheduler.queues import get_queues, logger, get_queue
-from scheduler.rq_classes import DjangoWorker, JobExecution, TASK_TYPES, MODEL_NAMES
-from scheduler.settings import SCHEDULER_CONFIG, Broker
-
-
-class TaskType(models.TextChoices):
- CRON = "CronTaskType", _("Cron Task")
- REPEATABLE = "RepeatableTaskType", _("Repeatable Task")
- ONCE = "OnceTaskType", _("Run once")
-
-
-def callable_func(callable_str: str) -> Callable:
- path = callable_str.split(".")
- module = importlib.import_module(".".join(path[:-1]))
- func = getattr(module, path[-1])
- if callable(func) is False:
- raise TypeError("'{}' is not callable".format(callable_str))
- return func
-
-
-def get_next_cron_time(cron_string: Optional[str]) -> Optional[timezone.datetime]:
- """Calculate the next scheduled time by creating a crontab object with a cron string"""
- if cron_string is None:
- return None
- now = timezone.now()
- itr = croniter.croniter(cron_string, now)
- next_itr = itr.get_next(timezone.datetime)
- return next_itr
-
-
-def get_scheduled_task(task_type_str: str, task_id: int) -> "BaseTask": # noqa: F821
- # Try with new model names
- model = apps.get_model(app_label="scheduler", model_name="Task")
- if task_type_str in TASK_TYPES:
- try:
- task_type = TaskType(task_type_str)
- task = model.objects.filter(task_type=task_type, id=task_id).first()
- if task is None:
- raise ValueError(f"Job {task_type}:{task_id} does not exit")
- return task
- except ValueError:
- raise ValueError(f"Invalid task type {task_type_str}")
- elif task_type_str in MODEL_NAMES:
- model = apps.get_model(app_label="scheduler", model_name=task_type_str)
- task = model.objects.filter(id=task_id).first()
- if task is None:
- raise ValueError(f"Job {task_type_str}:{task_id} does not exit")
- return task
- raise ValueError(f"Job Model {task_type_str} does not exist, choices are {TASK_TYPES}")
-
-
-def run_task(task_model: str, task_id: int) -> Any:
- """Run a scheduled job"""
- scheduled_task = get_scheduled_task(task_model, task_id)
- logger.debug(f"Running task {str(scheduled_task)}")
- args = scheduled_task.parse_args()
- kwargs = scheduled_task.parse_kwargs()
- res = scheduled_task.callable_func()(*args, **kwargs)
- return res
-
-
-def _calc_worker_name(existing_worker_names) -> str:
- hostname = os.uname()[1]
- c = 1
- worker_name = f"{hostname}-worker.{c}"
- while worker_name in existing_worker_names:
- c += 1
- worker_name = f"{hostname}-worker.{c}"
- return worker_name
-
-
-def create_worker(*queue_names, **kwargs) -> DjangoWorker:
- """Returns a Django worker for all queues or specified ones."""
-
- queues = get_queues(*queue_names)
- existing_workers = DjangoWorker.all(connection=queues[0].connection)
- existing_worker_names = set(map(lambda w: w.name, existing_workers))
- kwargs.setdefault("fork_job_execution", SCHEDULER_CONFIG.BROKER != Broker.FAKEREDIS)
- if kwargs.get("name", None) is None:
- kwargs["name"] = _calc_worker_name(existing_worker_names)
-
- kwargs["name"] = kwargs["name"].replace("/", ".")
-
- # Handle job_class if provided
- if "job_class" not in kwargs or kwargs["job_class"] is None:
- kwargs["job_class"] = "scheduler.rq_classes.JobExecution"
- try:
- kwargs["job_class"] = import_string(kwargs["job_class"])
- except ImportError:
- raise ImportError(f"Could not import job class {kwargs['job_class']}")
-
- worker = DjangoWorker(queues, connection=queues[0].connection, **kwargs)
- return worker
-
-
-def get_job_executions_for_task(queue_name, scheduled_task) -> List[JobExecution]:
- queue = get_queue(queue_name)
- job_list = queue.get_all_jobs()
- res = list(filter(lambda j: j.is_execution_of(scheduled_task), job_list))
- return res
diff --git a/scheduler/urls.py b/scheduler/urls.py
index 4dfa9b3..b876a97 100644
--- a/scheduler/urls.py
+++ b/scheduler/urls.py
@@ -6,16 +6,16 @@
path("queues/", views.stats, name="queues_home"),
path("queues/stats.json", views.stats_json, name="queues_home_json"),
path("queues//workers/", views.queue_workers, name="queue_workers"),
- path("queues///jobs", views.jobs_view, name="queue_registry_jobs"),
+ path("queues///jobs", views.registry_jobs, name="queue_registry_jobs"),
path("queues///empty/", views.clear_queue_registry, name="queue_clear"),
path("queues///requeue-all/", views.requeue_all, name="queue_requeue_all"),
- path("queues//confirm-action/", views.confirm_action, name="queue_confirm_action"),
- path("queues//actions/", views.actions, name="queue_actions"),
+ path("queues//confirm-action/", views.queue_confirm_action, name="queue_confirm_action"),
+ path("queues//actions/", views.queue_actions, name="queue_actions"),
]
urlpatterns += [
- path("workers/", views.workers, name="workers_home"),
+ path("workers/", views.workers_list, name="workers_home"),
path("workers//", views.worker_details, name="worker_details"),
- path("jobs//", views.job_detail, name="job_details"),
- path("jobs///", views.job_action, name="queue_job_action"),
+ path("jobs//", views.job_detail, name="job_details"),
+ path("jobs///", views.job_action, name="queue_job_action"),
]
diff --git a/scheduler/views.py b/scheduler/views.py
deleted file mode 100644
index 3394764..0000000
--- a/scheduler/views.py
+++ /dev/null
@@ -1,471 +0,0 @@
-from html import escape
-from math import ceil
-from typing import Tuple, Optional
-
-from django.contrib import admin, messages
-from django.contrib.admin.views.decorators import staff_member_required
-from django.core.paginator import Paginator
-from django.http import JsonResponse, HttpResponse, HttpRequest
-from django.http.response import HttpResponseNotFound, Http404, HttpResponseBadRequest
-from django.shortcuts import redirect
-from django.shortcuts import render
-from django.urls import reverse, resolve
-from django.views.decorators.cache import never_cache
-
-from .broker_types import ConnectionErrorTypes, ResponseErrorTypes
-from .queues import get_all_workers, get_connection, QueueNotFoundError
-from .queues import get_queue as get_queue_base
-from .rq_classes import JobExecution, DjangoWorker, DjangoQueue, InvalidJobOperation
-from .settings import SCHEDULER_CONFIG, logger
-
-
-def get_queue(queue_name: str) -> DjangoQueue:
- try:
- return get_queue_base(queue_name)
- except QueueNotFoundError as e:
- logger.error(e)
- raise Http404(e)
-
-
-def get_worker_executions(worker):
- res = list()
- for queue in worker.queues:
- curr_jobs = queue.get_all_jobs()
- curr_jobs = [j for j in curr_jobs if j.worker_name == worker.name]
- res.extend(curr_jobs)
- return res
-
-
-# Create your views here.
-@never_cache
-@staff_member_required
-def stats(request):
- context_data = {**admin.site.each_context(request), **get_statistics(run_maintenance_tasks=True)}
- return render(request, "admin/scheduler/stats.html", context_data)
-
-
-def stats_json(request):
- auth_token = request.headers.get("Authorization")
- token_validation_func = SCHEDULER_CONFIG.TOKEN_VALIDATION_METHOD
- if request.user.is_staff or (token_validation_func and auth_token and token_validation_func(auth_token)):
- return JsonResponse(get_statistics())
-
- return HttpResponseNotFound()
-
-
-def get_statistics(run_maintenance_tasks=False):
- from scheduler.settings import QUEUES
-
- queues = []
- if run_maintenance_tasks:
- workers = get_all_workers()
- for worker in workers:
- worker.clean_registries()
- for queue_name in QUEUES:
- try:
- queue = get_queue(queue_name)
- connection = get_connection(QUEUES[queue_name])
- connection_kwargs = connection.connection_pool.connection_kwargs
-
- if run_maintenance_tasks:
- queue.clean_registries()
-
- # Raw access to the first item from left of the broker list.
- # This might not be accurate since new job can be added from the left
- # with `at_front` parameters.
- # Ideally rq should supports Queue.oldest_job
-
- last_job_id = queue.last_job_id()
- last_job = queue.fetch_job(last_job_id.decode("utf-8")) if last_job_id else None
- if last_job and last_job.enqueued_at:
- oldest_job_timestamp = last_job.enqueued_at.strftime("%Y-%m-%d, %H:%M:%S")
- else:
- oldest_job_timestamp = "-"
-
- # parse_class and connection_pool are not needed and not JSON serializable
- connection_kwargs.pop("parser_class", None)
- connection_kwargs.pop("connection_pool", None)
-
- queue_data = dict(
- name=queue.name,
- jobs=queue.count,
- oldest_job_timestamp=oldest_job_timestamp,
- connection_kwargs=connection_kwargs,
- scheduler_pid=queue.scheduler_pid,
- workers=DjangoWorker.count(queue=queue),
- finished_jobs=len(queue.finished_job_registry),
- started_jobs=len(queue.started_job_registry),
- deferred_jobs=len(queue.deferred_job_registry),
- failed_jobs=len(queue.failed_job_registry),
- scheduled_jobs=len(queue.scheduled_job_registry),
- canceled_jobs=len(queue.canceled_job_registry),
- )
- queues.append(queue_data)
- except ConnectionErrorTypes as e:
- logger.error(f"Could not connect for queue {queue_name}: {e}")
- continue
-
- return {"queues": queues}
-
-
-def _get_registry_job_list(queue, registry, page):
- items_per_page = SCHEDULER_CONFIG.EXECUTIONS_IN_PAGE
- num_jobs = len(registry)
- job_list = []
-
- if num_jobs == 0:
- return job_list, num_jobs, []
-
- last_page = int(ceil(num_jobs / items_per_page))
- page_range = range(1, last_page + 1)
- offset = items_per_page * (page - 1)
- job_ids = registry.get_job_ids(offset, offset + items_per_page - 1)
- job_list = JobExecution.fetch_many(job_ids, connection=queue.connection)
- remove_job_ids = [job_id for i, job_id in enumerate(job_ids) if job_list[i] is None]
- valid_jobs = [job for job in job_list if job is not None]
- if registry is not queue:
- for job_id in remove_job_ids:
- registry.remove(job_id)
-
- return valid_jobs, num_jobs, page_range
-
-
-@never_cache
-@staff_member_required
-def jobs_view(request, queue_name: str, registry_name: str):
- queue = get_queue(queue_name)
- registry = queue.get_registry(registry_name)
- if registry is None:
- return HttpResponseNotFound()
- title = registry_name.capitalize()
- page = int(request.GET.get("page", 1))
- job_list, num_jobs, page_range = _get_registry_job_list(queue, registry, page)
-
- context_data = {
- **admin.site.each_context(request),
- "queue": queue,
- "registry_name": registry_name,
- "registry": registry,
- "jobs": job_list,
- "num_jobs": num_jobs,
- "page": page,
- "page_range": page_range,
- "job_status": title,
- }
- return render(request, "admin/scheduler/jobs.html", context_data)
-
-
-@never_cache
-@staff_member_required
-def queue_workers(request: HttpRequest, queue_name: str) -> HttpResponse:
- queue = get_queue(queue_name)
- all_workers = DjangoWorker.all(queue.connection)
- for w in all_workers:
- w.clean_registries()
- worker_list = [worker for worker in all_workers if queue.name in worker.queue_names()]
-
- context_data = {
- **admin.site.each_context(request),
- "queue": queue,
- "workers": worker_list,
- }
- return render(request, "admin/scheduler/queue_workers.html", context_data)
-
-
-@never_cache
-@staff_member_required
-def workers(request: HttpRequest) -> HttpResponse:
- all_workers = get_all_workers()
- worker_list = [worker for worker in all_workers]
-
- context_data = {
- **admin.site.each_context(request),
- "workers": worker_list,
- }
- return render(request, "admin/scheduler/workers.html", context_data)
-
-
-@never_cache
-@staff_member_required
-def worker_details(request: HttpRequest, name: str) -> HttpResponse:
- queue, worker = None, None
- workers = get_all_workers()
- worker = next((w for w in workers if w.name == name), None)
-
- if worker is None:
- raise Http404(f"Couldn't find worker with this ID: {name}")
- # Convert microseconds to milliseconds
- worker.total_working_time = worker.total_working_time / 1000
-
- execution_list = get_worker_executions(worker)
- paginator = Paginator(execution_list, SCHEDULER_CONFIG.EXECUTIONS_IN_PAGE)
- page_number = request.GET.get("p", 1)
- page_obj = paginator.get_page(page_number)
- page_range = paginator.get_elided_page_range(page_obj.number)
- context_data = {
- **admin.site.each_context(request),
- "queue": queue,
- "worker": worker,
- "queue_names": ", ".join(worker.queue_names()),
- "job": worker.get_current_job(),
- "total_working_time": worker.total_working_time * 1000,
- "executions": page_obj,
- "page_range": page_range,
- "page_var": "p",
- }
- return render(request, "admin/scheduler/worker_details.html", context_data)
-
-
-def _find_job(job_id: str) -> Tuple[Optional[DjangoQueue], Optional[JobExecution]]:
- from scheduler.settings import QUEUES
-
- for queue_name in QUEUES:
- try:
- queue = get_queue(queue_name)
- job = JobExecution.fetch(job_id, connection=queue.connection)
- if job.origin == queue_name:
- return queue, job
- except Exception:
- pass
- return None, None
-
-
-@never_cache
-@staff_member_required
-def job_detail(request: HttpRequest, job_id: str) -> HttpResponse:
- queue, job = _find_job(job_id)
- if job is None:
- return HttpResponseBadRequest(f"Job {escape(job_id)} does not exist, maybe its TTL has passed")
- try:
- job.func_name
- data_is_valid = True
- except Exception:
- data_is_valid = False
-
- try:
- exc_info = job._exc_info
- except AttributeError:
- exc_info = None
-
- context_data = {
- **admin.site.each_context(request),
- "job": job,
- "dependency_id": job._dependency_id,
- "queue": queue,
- "data_is_valid": data_is_valid,
- "exc_info": exc_info,
- }
- return render(request, "admin/scheduler/job_detail.html", context_data)
-
-
-@never_cache
-@staff_member_required
-def clear_queue_registry(request: HttpRequest, queue_name: str, registry_name: str) -> HttpResponse:
- queue = get_queue(queue_name)
- registry = queue.get_registry(registry_name)
- if registry is None:
- return HttpResponseNotFound()
-
- next_url = request.META.get("HTTP_REFERER") or reverse("queue_registry_jobs", args=[queue_name, registry_name])
- if request.method == "POST":
- try:
- if registry is queue:
- queue.empty()
- else:
- job_ids = registry.get_job_ids()
- for job_id in job_ids:
- registry.remove(job_id, delete_job=True)
- messages.info(request, f"You have successfully cleared the {registry_name} jobs in queue {queue.name}")
- except ResponseErrorTypes as e:
- messages.error(
- request,
- f"error: {e}",
- )
- raise e
- return redirect("queue_registry_jobs", queue_name, registry_name)
- job_ids = registry.get_job_ids()
- job_list = JobExecution.fetch_many(job_ids, connection=queue.connection)
- context_data = {
- **admin.site.each_context(request),
- "queue": queue,
- "total_jobs": len(registry),
- "action": "empty",
- "jobs": job_list,
- "next_url": next_url,
- "action_url": reverse(
- "queue_clear",
- args=[
- queue_name,
- registry_name,
- ],
- ),
- }
- return render(request, "admin/scheduler/confirm_action.html", context_data)
-
-
-@never_cache
-@staff_member_required
-def requeue_all(request: HttpRequest, queue_name: str, registry_name: str) -> HttpResponse:
- queue = get_queue(queue_name)
- registry = queue.get_registry(registry_name)
- if registry is None:
- return HttpResponseNotFound()
- next_url = request.META.get("HTTP_REFERER") or reverse("queue_registry_jobs", args=[queue_name, registry_name])
- job_ids = registry.get_job_ids()
- if request.method == "POST":
- count = 0
- # Confirmation received
- jobs = JobExecution.fetch_many(job_ids, connection=queue.connection)
- for job in jobs:
- if job is None:
- continue
- try:
- job.requeue()
- count += 1
- except Exception:
- pass
-
- messages.info(request, f"You have successfully re-queued {count} jobs!")
- return redirect("queue_registry_jobs", queue_name, registry_name)
-
- context_data = {
- **admin.site.each_context(request),
- "queue": queue,
- "total_jobs": len(queue.failed_job_registry),
- "action": "requeue",
- "jobs": [queue.fetch_job(job_id) for job_id in job_ids],
- "next_url": next_url,
- "action_url": reverse("queue_requeue_all", args=[queue_name, registry_name]),
- }
-
- return render(request, "admin/scheduler/confirm_action.html", context_data)
-
-
-@never_cache
-@staff_member_required
-def confirm_action(request: HttpRequest, queue_name: str) -> HttpResponse:
- queue = get_queue(queue_name)
- next_url = request.META.get("HTTP_REFERER") or reverse("queue_registry_jobs", args=[queue_name, "queued"])
- try:
- resolve(next_url)
- except Exception:
- messages.warning(request, "Bad followup URL")
- next_url = reverse("queue_registry_jobs", args=[queue_name, "queued"])
-
- if request.method == "POST" and request.POST.get("action", False):
- # confirm action
- if request.POST.get("_selected_action", False):
- job_id_list = request.POST.getlist("_selected_action")
- context_data = {
- **admin.site.each_context(request),
- "action": request.POST["action"],
- "jobs": [queue.fetch_job(job_id) for job_id in job_id_list],
- "total_jobs": len(job_id_list),
- "queue": queue,
- "next_url": next_url,
- "action_url": reverse(
- "queue_actions",
- args=[
- queue_name,
- ],
- ),
- }
- return render(request, "admin/scheduler/confirm_action.html", context_data)
-
- return redirect(next_url)
-
-
-@never_cache
-@staff_member_required
-def actions(request: HttpRequest, queue_name: str) -> HttpResponse:
- queue = get_queue(queue_name)
- next_url = request.POST.get("next_url") or reverse("queue_registry_jobs", args=[queue_name, "queued"])
- try:
- resolve(next_url)
- except Exception:
- messages.warning(request, "Bad followup URL")
- next_url = reverse("queue_registry_jobs", args=[queue_name, "queued"])
-
- action = request.POST.get("action", False)
- job_ids = request.POST.get("job_ids", False)
- if request.method != "POST" or not action or not job_ids:
- return redirect(next_url)
- job_ids = request.POST.getlist("job_ids")
- if action == "delete":
- jobs = JobExecution.fetch_many(job_ids, connection=queue.connection)
- for job in jobs:
- if job is None:
- continue
- # Remove job id from queue and delete the actual job
- queue.remove_job_id(job.id)
- job.delete()
- messages.info(request, f"You have successfully deleted {len(job_ids)} jobs!")
- elif action == "requeue":
- jobs = JobExecution.fetch_many(job_ids, connection=queue.connection)
- for job in jobs:
- if job is None:
- continue
- job.requeue()
- messages.info(request, f"You have successfully re-queued {len(job_ids)} jobs!")
- elif action == "stop":
- cancelled_jobs = 0
- jobs = JobExecution.fetch_many(job_ids, connection=queue.connection)
- for job in jobs:
- if job is None:
- continue
- try:
- job.stop_execution(queue.connection)
- job.cancel()
- cancelled_jobs += 1
- except Exception as e:
- logger.warning(f"Could not stop job: {e}")
- pass
- messages.info(request, f"You have successfully stopped {cancelled_jobs} jobs!")
- return redirect(next_url)
-
-
-SUPPORTED_JOB_ACTIONS = {"requeue", "delete", "enqueue", "cancel"}
-
-
-@never_cache
-@staff_member_required
-def job_action(request: HttpRequest, job_id: str, action: str) -> HttpResponse:
- queue, job = _find_job(job_id)
- if job is None:
- return HttpResponseBadRequest(f"Job {escape(job_id)} does not exist, maybe its TTL has passed")
- if action not in SUPPORTED_JOB_ACTIONS:
- return HttpResponseNotFound()
-
- if request.method != "POST":
- context_data = {
- **admin.site.each_context(request),
- "job": job,
- "queue": queue,
- "action": action,
- }
- return render(request, "admin/scheduler/single_job_action.html", context_data)
-
- try:
- if action == "requeue":
- job.requeue()
- messages.info(request, f"You have successfully re-queued {job.id}")
- return redirect("job_details", job_id)
- elif action == "delete":
- # Remove job id from queue and delete the actual job
- queue.remove_job_id(job.id)
- job.delete()
- messages.info(request, "You have successfully deleted %s" % job.id)
- return redirect("queue_registry_jobs", queue.name, "queued")
- elif action == "enqueue":
- job.delete(remove_from_queue=False)
- queue._enqueue_job(job)
- messages.info(request, "You have successfully enqueued %s" % job.id)
- return redirect("job_details", job_id)
- elif action == "cancel":
- job.cancel()
- messages.info(request, "You have successfully enqueued %s" % job.id)
- return redirect("job_details", job_id)
- except InvalidJobOperation as e:
- logger.warning(f"Could not perform action: {e}")
- messages.warning(request, f"Could not perform action: {e}")
- return redirect("job_details", job_id)
diff --git a/scheduler/views/__init__.py b/scheduler/views/__init__.py
new file mode 100644
index 0000000..679915c
--- /dev/null
+++ b/scheduler/views/__init__.py
@@ -0,0 +1,20 @@
+__all__ = [
+ "job_detail",
+ "job_action",
+ "stats",
+ "stats_json",
+ "clear_queue_registry",
+ "requeue_all",
+ "queue_confirm_action",
+ "queue_workers",
+ "queue_actions",
+ "registry_jobs",
+ "workers_list",
+ "worker_details",
+ "get_statistics",
+]
+
+from .job_views import job_detail, job_action
+from .queue_views import (stats, stats_json, clear_queue_registry, requeue_all, queue_confirm_action, queue_actions, queue_workers, \
+ registry_jobs, get_statistics)
+from .worker_views import workers_list, worker_details
diff --git a/scheduler/views/helpers.py b/scheduler/views/helpers.py
new file mode 100644
index 0000000..2043951
--- /dev/null
+++ b/scheduler/views/helpers.py
@@ -0,0 +1,35 @@
+from typing import Tuple, Optional
+
+from django.http import Http404
+
+from scheduler.helpers.queues import Queue
+from scheduler.helpers.queues import get_queue as get_queue_base
+from scheduler.redis_models import JobModel
+from scheduler.settings import get_queue_names, logger, QueueNotFoundError
+
+_QUEUES_WITH_BAD_CONFIGURATION = set()
+
+
+def get_queue(queue_name: str) -> Queue:
+ try:
+ return get_queue_base(queue_name)
+ except QueueNotFoundError as e:
+ logger.error(e)
+ raise Http404(e)
+
+
+def _find_job(job_name: str) -> Tuple[Optional[Queue], Optional[JobModel]]:
+ queue_names = get_queue_names()
+ for queue_name in queue_names:
+ if queue_name in _QUEUES_WITH_BAD_CONFIGURATION:
+ continue
+ try:
+ queue = get_queue(queue_name)
+ job = JobModel.get(job_name, connection=queue.connection)
+ if job is not None and job.queue_name == queue_name:
+ return queue, job
+ except Exception as e:
+ _QUEUES_WITH_BAD_CONFIGURATION.add(queue_name)
+ logger.debug(f"Queue {queue_name} added to bad configuration - Got exception: {e}")
+ pass
+ return None, None
diff --git a/scheduler/views/job_views.py b/scheduler/views/job_views.py
new file mode 100644
index 0000000..30d295a
--- /dev/null
+++ b/scheduler/views/job_views.py
@@ -0,0 +1,95 @@
+from html import escape
+
+from django.contrib import admin, messages
+from django.contrib.admin.views.decorators import staff_member_required
+from django.http import HttpResponse, HttpRequest
+from django.http.response import HttpResponseBadRequest
+from django.shortcuts import render, redirect
+from django.views.decorators.cache import never_cache
+
+from scheduler.helpers.queues import InvalidJobOperation
+from scheduler.redis_models import Result
+from scheduler.settings import logger
+from scheduler.views.helpers import _find_job
+from scheduler.worker.commands import send_command, StopJobCommand
+
+
+@never_cache
+@staff_member_required
+def job_detail(request: HttpRequest, job_name: str) -> HttpResponse:
+ queue, job = _find_job(job_name)
+ if job is None:
+ messages.warning(request, f"Job {escape(job_name)} does not exist, maybe its TTL has passed")
+ return redirect("queues_home")
+ try:
+ job.func_name
+ data_is_valid = True
+ except Exception:
+ data_is_valid = False
+
+ try:
+ last_result = Result.fetch_latest(queue.connection, job.name)
+ except AttributeError:
+ last_result = None
+
+ context_data = {
+ **admin.site.each_context(request),
+ "job": job,
+ "last_result": last_result,
+ "results": Result.all(connection=queue.connection, parent=job.name),
+ "queue": queue,
+ "data_is_valid": data_is_valid,
+ }
+ return render(request, "admin/scheduler/job_detail.html", context_data)
+
+
+SUPPORTED_JOB_ACTIONS = {"requeue", "delete", "enqueue", "cancel"}
+
+
+@never_cache
+@staff_member_required
+def job_action(request: HttpRequest, job_name: str, action: str) -> HttpResponse:
+ queue, job = _find_job(job_name)
+ if job is None:
+ messages.warning(request, f"Job {escape(job_name)} does not exist, maybe its TTL has passed")
+ return redirect("queues_home")
+ if action not in SUPPORTED_JOB_ACTIONS:
+ return HttpResponseBadRequest(f"Action {escape(action)} is not supported")
+
+ if request.method != "POST":
+ context_data = {
+ **admin.site.each_context(request),
+ "job": job,
+ "queue": queue,
+ "action": action,
+ }
+ return render(request, "admin/scheduler/single_job_action.html", context_data)
+
+ try:
+ if action == "requeue":
+ requeued_jobs_count = queue.requeue_jobs(job.name)
+ if requeued_jobs_count == 0:
+ messages.warning(request, f"Could not requeue {job.name}")
+ else:
+ messages.info(request, f"You have successfully re-queued {job.name}")
+ return redirect("job_details", job_name)
+ elif action == "delete":
+ queue.delete_job(job.name)
+ messages.info(request, f"You have successfully deleted {job.name}")
+ return redirect("queue_registry_jobs", queue.name, "queued")
+ elif action == "enqueue":
+ queue.delete_job(job.name, expire_job_model=False)
+ queue.enqueue_job(job)
+ messages.info(request, f"You have successfully enqueued {job.name}")
+ return redirect("job_details", job_name)
+ elif action == "cancel":
+ send_command(
+ connection=queue.connection, command=StopJobCommand(job_name=job.name, worker_name=job.worker_name)
+ )
+ queue.cancel_job(job.name)
+ messages.info(request, f"You have successfully cancelled {job.name}")
+ return redirect("job_details", job_name)
+ except InvalidJobOperation as e:
+ logger.warning(f"Could not perform action: {e}")
+ messages.warning(request, f"Could not perform action: {e}")
+ return redirect("job_details", job_name)
diff --git a/scheduler/views/queue_views.py b/scheduler/views/queue_views.py
new file mode 100644
index 0000000..7796917
--- /dev/null
+++ b/scheduler/views/queue_views.py
@@ -0,0 +1,324 @@
+import dataclasses
+from math import ceil
+from typing import Tuple, List, Dict, Union, Any
+from urllib.parse import urlparse
+
+from django.contrib import admin, messages
+from django.contrib.admin.views.decorators import staff_member_required
+from django.http import HttpResponse, HttpRequest, HttpResponseNotFound, JsonResponse
+from django.shortcuts import render, redirect
+from django.urls import reverse, resolve
+from django.utils.http import url_has_allowed_host_and_scheme
+from django.views.decorators.cache import never_cache
+
+from scheduler.broker_types import ConnectionErrorTypes, ResponseErrorTypes
+from scheduler.helpers.queues import Queue, get_all_workers
+from scheduler.redis_models import JobModel, JobNamesRegistry, WorkerModel
+from scheduler.settings import SCHEDULER_CONFIG, get_queue_names, logger
+from scheduler.views.helpers import get_queue
+from scheduler.worker.commands import StopJobCommand, send_command
+
+
+def _get_registry_job_list(queue: Queue, registry: JobNamesRegistry, page: int) -> Tuple[List[JobModel], int, range]:
+ items_per_page = SCHEDULER_CONFIG.EXECUTIONS_IN_PAGE
+ num_jobs = registry.count(queue.connection)
+ job_list = list()
+
+ if num_jobs == 0:
+ return job_list, num_jobs, range(1, 1)
+
+ last_page = int(ceil(num_jobs / items_per_page))
+ page_range = range(1, last_page + 1)
+ offset = items_per_page * (page - 1)
+ job_names = registry.all(offset, offset + items_per_page - 1)
+ job_list = JobModel.get_many(job_names, connection=queue.connection)
+ remove_job_names = [job_name for i, job_name in enumerate(job_names) if job_list[i] is None]
+ valid_jobs = [job for job in job_list if job is not None]
+ if registry is not queue:
+ for job_name in remove_job_names:
+ registry.delete(queue.connection, job_name)
+
+ return valid_jobs, num_jobs, page_range
+
+
+@never_cache
+@staff_member_required
+def registry_jobs(request: HttpRequest, queue_name: str, registry_name: str) -> HttpResponse:
+ queue = get_queue(queue_name)
+ registry = queue.get_registry(registry_name)
+ if registry is None:
+ return HttpResponseNotFound()
+ title = registry_name.capitalize()
+ page = int(request.GET.get("page", 1))
+ job_list, num_jobs, page_range = _get_registry_job_list(queue, registry, page)
+
+ context_data = {
+ **admin.site.each_context(request),
+ "queue": queue,
+ "registry_name": registry_name,
+ "registry": registry,
+ "jobs": job_list,
+ "num_jobs": num_jobs,
+ "page": page,
+ "page_range": page_range,
+ "job_status": title,
+ }
+ return render(request, "admin/scheduler/jobs.html", context_data)
+
+
+@never_cache
+@staff_member_required
+def queue_workers(request: HttpRequest, queue_name: str) -> HttpResponse:
+ queue = get_queue(queue_name)
+ queue.clean_registries()
+
+ all_workers = WorkerModel.all(queue.connection)
+ worker_list = [worker for worker in all_workers if queue.name in worker.queue_names]
+
+ context_data = {
+ **admin.site.each_context(request),
+ "queue": queue,
+ "workers": worker_list,
+ }
+ return render(request, "admin/scheduler/queue_workers.html", context_data)
+
+
+def stats_json(request: HttpRequest) -> Union[JsonResponse, HttpResponseNotFound]:
+ auth_token = request.headers.get("Authorization")
+ token_validation_func = SCHEDULER_CONFIG.TOKEN_VALIDATION_METHOD
+ if request.user.is_staff or (token_validation_func and auth_token and token_validation_func(auth_token)):
+ return JsonResponse(get_statistics())
+
+ return HttpResponseNotFound()
+
+
+@never_cache
+@staff_member_required
+def stats(request: HttpRequest) -> HttpResponse:
+ context_data = {**admin.site.each_context(request), **get_statistics(run_maintenance_tasks=True)}
+ return render(request, "admin/scheduler/stats.html", context_data)
+
+
+@never_cache
+@staff_member_required
+def clear_queue_registry(request: HttpRequest, queue_name: str, registry_name: str) -> HttpResponse:
+ queue = get_queue(queue_name)
+ registry = queue.get_registry(registry_name)
+ if registry is None:
+ return HttpResponseNotFound()
+ next_url = _check_next_url(request, reverse("queue_registry_jobs", args=[queue_name, registry_name]))
+ if request.method == "POST":
+ try:
+ if registry is queue:
+ queue.empty()
+ elif isinstance(registry, JobNamesRegistry):
+ job_names = registry.all()
+ for job_name in job_names:
+ registry.delete(registry.connection, job_name)
+ job_model = JobModel.get(job_name, connection=registry.connection)
+ job_model.delete(connection=registry.connection)
+ messages.info(request, f"You have successfully cleared the {registry_name} jobs in queue {queue.name}")
+ except ResponseErrorTypes as e:
+ messages.error(request, f"error: {e}")
+ raise e
+ return redirect("queue_registry_jobs", queue_name, registry_name)
+ job_names = registry.all()
+ job_list = JobModel.get_many(job_names, connection=queue.connection)
+ context_data = {
+ **admin.site.each_context(request),
+ "queue": queue,
+ "total_jobs": len(registry),
+ "action": "empty",
+ "jobs": job_list,
+ "next_url": next_url,
+ "action_url": reverse(
+ "queue_clear",
+ args=[
+ queue_name,
+ registry_name,
+ ],
+ ),
+ }
+ return render(request, "admin/scheduler/confirm_action.html", context_data)
+
+
+@never_cache
+@staff_member_required
+def requeue_all(request: HttpRequest, queue_name: str, registry_name: str) -> HttpResponse:
+ queue = get_queue(queue_name)
+ registry = queue.get_registry(registry_name)
+ if registry is None:
+ return HttpResponseNotFound()
+ next_url = request.META.get("HTTP_REFERER") or reverse("queue_registry_jobs", args=[queue_name, registry_name])
+ job_names = registry.all()
+ if request.method == "POST":
+ # Confirmation received
+ jobs_requeued_count = queue.requeue_jobs(*job_names)
+ messages.info(request, f"You have successfully re-queued {jobs_requeued_count} jobs!")
+ return redirect("queue_registry_jobs", queue_name, registry_name)
+
+ context_data = {
+ **admin.site.each_context(request),
+ "queue": queue,
+ "total_jobs": queue.count,
+ "action": "requeue",
+ "jobs": [JobModel.get(job_name, connection=queue.connection) for job_name in job_names],
+ "next_url": next_url,
+ "action_url": reverse("queue_requeue_all", args=[queue_name, registry_name]),
+ }
+
+ return render(request, "admin/scheduler/confirm_action.html", context_data)
+
+
+@never_cache
+@staff_member_required
+def queue_confirm_action(request: HttpRequest, queue_name: str) -> HttpResponse:
+ queue = get_queue(queue_name)
+ next_url = _check_next_url(request, reverse("queue_registry_jobs", args=[queue_name, "queued"]))
+ if request.method != "POST":
+ return redirect(next_url)
+ action = request.POST.get("action", None)
+ job_names = request.POST.getlist("_selected_action", None)
+ if action is None or job_names is None:
+ return redirect(next_url)
+
+ # confirm action
+ context_data = {
+ **admin.site.each_context(request),
+ "action": action,
+ "jobs": [JobModel.get(job_name, connection=queue.connection) for job_name in job_names],
+ "total_jobs": len(job_names),
+ "queue": queue,
+ "next_url": next_url,
+ "action_url": reverse(
+ "queue_actions",
+ args=[
+ queue_name,
+ ],
+ ),
+ }
+ return render(request, "admin/scheduler/confirm_action.html", context_data)
+
+_QUEUE_ACTIONS = {"delete", "requeue", "stop"}
+@never_cache
+@staff_member_required
+def queue_actions(request: HttpRequest, queue_name: str) -> HttpResponse:
+ queue = get_queue(queue_name)
+ next_url = _check_next_url(request, reverse("queue_registry_jobs", args=[queue_name, "queued"]))
+ action = request.POST.get("action", False)
+ job_names = request.POST.get("job_names", False)
+ if request.method != "POST" or not action or not job_names:
+ return redirect(next_url)
+ job_names = request.POST.getlist("job_names")
+ if action == "delete":
+ jobs = JobModel.get_many(job_names, connection=queue.connection)
+ for job in jobs:
+ if job is None:
+ continue
+ queue.delete_job(job.name)
+ messages.info(request, f"You have successfully deleted {len(job_names)} jobs!")
+ elif action == "requeue":
+ requeued_jobs_count = queue.requeue_jobs(*job_names)
+ messages.info(request, f"You have successfully re-queued {requeued_jobs_count}/{len(job_names)} jobs!")
+ elif action == "stop":
+ cancelled_jobs = 0
+ jobs = JobModel.get_many(job_names, connection=queue.connection)
+ for job in jobs:
+ if job is None:
+ continue
+ try:
+ command = StopJobCommand(job_name=job.name, worker_name=job.worker_name)
+ send_command(connection=queue.connection, command=command)
+ queue.cancel_job(job.name)
+ cancelled_jobs += 1
+ except Exception as e:
+ logger.warning(f"Could not stop job: {e}")
+ pass
+ messages.info(request, f"You have successfully stopped {cancelled_jobs} jobs!")
+ return redirect(next_url)
+
+
+@dataclasses.dataclass
+class QueueData:
+ name: str
+ queued_jobs: int
+ oldest_job_timestamp: str
+ connection_kwargs: dict
+ scheduler_pid: int
+ workers: int
+ finished_jobs: int
+ started_jobs: int
+ failed_jobs: int
+ scheduled_jobs: int
+ canceled_jobs: int
+
+
+def get_statistics(run_maintenance_tasks: bool = False) -> Dict[str, List[Dict[str, Any]]]:
+ queue_names = get_queue_names()
+ queues: List[QueueData] = []
+ queue_workers_count: Dict[str, int] = {queue_name: 0 for queue_name in queue_names}
+ workers = get_all_workers()
+ for worker in workers:
+ for queue_name in worker.queue_names:
+ if queue_name not in queue_workers_count:
+ logger.warning(f"Worker {worker.name} ({queue_name}) has no queue")
+ queue_workers_count[queue_name] = 0
+ queue_workers_count[queue_name] += 1
+ for queue_name in queue_names:
+ try:
+ queue = get_queue(queue_name)
+ connection_kwargs = queue.connection.connection_pool.connection_kwargs
+
+ if run_maintenance_tasks:
+ queue.clean_registries()
+
+ # Raw access to the first item from left of the broker list.
+ # This might not be accurate since new job can be added from the left
+ # with `at_front` parameters.
+ # Ideally rq should supports Queue.oldest_job
+
+ last_job_name = queue.first_queued_job_name()
+ last_job = JobModel.get(last_job_name, connection=queue.connection) if last_job_name else None
+ if last_job and last_job.enqueued_at:
+ oldest_job_timestamp = last_job.enqueued_at.isoformat()
+ else:
+ oldest_job_timestamp = "-"
+
+ # parse_class and connection_pool are not needed and not JSON serializable
+ connection_kwargs.pop("parser_class", None)
+ connection_kwargs.pop("connection_pool", None)
+
+ queue_data = QueueData(
+ name=queue.name,
+ queued_jobs=len(queue.queued_job_registry),
+ oldest_job_timestamp=oldest_job_timestamp,
+ connection_kwargs=connection_kwargs,
+ scheduler_pid=queue.scheduler_pid,
+ workers=queue_workers_count[queue.name],
+ finished_jobs=len(queue.finished_job_registry),
+ started_jobs=len(queue.active_job_registry),
+ failed_jobs=len(queue.failed_job_registry),
+ scheduled_jobs=len(queue.scheduled_job_registry),
+ canceled_jobs=len(queue.canceled_job_registry),
+ )
+ queues.append(queue_data)
+ except ConnectionErrorTypes as e:
+ logger.error(f"Could not connect for queue {queue_name}: {e}")
+ continue
+
+ return {"queues": [dataclasses.asdict(q) for q in queues]}
+
+
+def _check_next_url(request: HttpRequest, default_next_url: str) -> str:
+ next_url = request.POST.get("next_url", default_next_url)
+ next_url = next_url.replace('\\', '')
+ if not url_has_allowed_host_and_scheme(next_url, allowed_hosts=None) or urlparse(next_url).netloc or urlparse(
+ next_url).scheme:
+ messages.warning(request, "Bad followup URL")
+ next_url = default_next_url
+ try:
+ resolve(next_url)
+ except Exception:
+ messages.warning(request, "Bad followup URL")
+ next_url = default_next_url
+ return next_url
diff --git a/scheduler/views/worker_views.py b/scheduler/views/worker_views.py
new file mode 100644
index 0000000..3f133e4
--- /dev/null
+++ b/scheduler/views/worker_views.py
@@ -0,0 +1,66 @@
+from typing import List
+
+from django.contrib import admin
+from django.contrib.admin.views.decorators import staff_member_required
+from django.core.paginator import Paginator
+from django.http import HttpResponse, HttpRequest, Http404
+from django.shortcuts import render
+from django.views.decorators.cache import never_cache
+
+from scheduler.helpers.queues import get_all_workers
+from scheduler.redis_models import WorkerModel, JobModel
+from scheduler.settings import SCHEDULER_CONFIG
+from scheduler.views.helpers import get_queue
+
+
+def get_worker_executions(worker: WorkerModel) -> List[JobModel]:
+ res = list()
+ for queue_name in worker.queue_names:
+ queue = get_queue(queue_name)
+ curr_jobs = queue.get_all_jobs()
+ curr_jobs = [j for j in curr_jobs if j.worker_name == worker.name]
+ res.extend(curr_jobs)
+ return res
+
+
+@never_cache
+@staff_member_required
+def worker_details(request: HttpRequest, name: str) -> HttpResponse:
+ workers = get_all_workers()
+ worker = next((w for w in workers if w.name == name), None)
+
+ if worker is None:
+ raise Http404(f"Couldn't find worker with this ID: {name}")
+
+ execution_list = get_worker_executions(worker)
+ paginator = Paginator(execution_list, SCHEDULER_CONFIG.EXECUTIONS_IN_PAGE)
+ page_number = request.GET.get("p", 1)
+ page_obj = paginator.get_page(page_number)
+ page_range = paginator.get_elided_page_range(page_obj.number)
+ current_job = None
+ if worker.current_job_name is not None:
+ queue = get_queue(worker.queue_names[0])
+ current_job = JobModel.get(worker.current_job_name, connection=queue.connection)
+ context_data = {
+ **admin.site.each_context(request),
+ "worker": worker,
+ "queue_names": ", ".join(worker.queue_names),
+ "current_job": current_job,
+ "executions": page_obj,
+ "page_range": page_range,
+ "page_var": "p",
+ }
+ return render(request, "admin/scheduler/worker_details.html", context_data)
+
+
+@never_cache
+@staff_member_required
+def workers_list(request: HttpRequest) -> HttpResponse:
+ all_workers = get_all_workers()
+ worker_list = [worker for worker in all_workers]
+
+ context_data = {
+ **admin.site.each_context(request),
+ "workers": worker_list,
+ }
+ return render(request, "admin/scheduler/workers_list.html", context_data)
diff --git a/scheduler/worker/__init__.py b/scheduler/worker/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/scheduler/worker/commands/__init__.py b/scheduler/worker/commands/__init__.py
new file mode 100644
index 0000000..2d259d9
--- /dev/null
+++ b/scheduler/worker/commands/__init__.py
@@ -0,0 +1,13 @@
+__all__ = [
+ "WorkerCommandsChannelListener",
+ "StopJobCommand",
+ "ShutdownCommand",
+ "KillWorkerCommand",
+ "WorkerCommandError",
+ "send_command",
+]
+
+from .kill_worker import KillWorkerCommand
+from .shutdown import ShutdownCommand
+from .stop_job import StopJobCommand
+from .worker_commands import WorkerCommandsChannelListener, WorkerCommandError, send_command
diff --git a/scheduler/worker/commands/kill_worker.py b/scheduler/worker/commands/kill_worker.py
new file mode 100644
index 0000000..2955818
--- /dev/null
+++ b/scheduler/worker/commands/kill_worker.py
@@ -0,0 +1,40 @@
+import errno
+import os
+import signal
+from typing import Optional
+
+from scheduler.broker_types import ConnectionType
+from scheduler.redis_models import WorkerModel
+from scheduler.settings import logger
+from scheduler.worker.commands.worker_commands import WorkerCommand
+
+
+class KillWorkerCommand(WorkerCommand):
+ """kill-worker command"""
+
+ command_name = "kill-worker"
+
+ def __init__(self, *args, **kwargs) -> None:
+ super().__init__(*args, **kwargs)
+ self.worker_pid: Optional[int] = None
+
+ def process_command(self, connection: ConnectionType) -> None:
+ from scheduler.worker.worker import Worker
+
+ logger.info("Received kill-worker command.")
+ worker_model = WorkerModel.get(self.worker_name, connection)
+ self.worker_pid = worker_model.pid
+ if self.worker_pid is None:
+ raise ValueError("Worker PID is not set")
+ logger.info(f"Killing worker main process {self.worker_pid}...")
+ try:
+ Worker.from_model(worker_model).request_stop(signal.SIGTERM, None)
+ os.killpg(os.getpgid(self.worker_pid), signal.SIGTERM)
+ logger.info(f"Killed worker main process pid {self.worker_pid}")
+ except OSError as e:
+ if e.errno == errno.ESRCH:
+ logger.debug(
+ f"Worker main process for {self.worker_name}:{self.worker_pid} already dead"
+ ) # "No such process" is fine with us
+ else:
+ raise
diff --git a/scheduler/worker/commands/shutdown.py b/scheduler/worker/commands/shutdown.py
new file mode 100644
index 0000000..1cf81be
--- /dev/null
+++ b/scheduler/worker/commands/shutdown.py
@@ -0,0 +1,17 @@
+import os
+import signal
+
+from scheduler.broker_types import ConnectionType
+from scheduler.settings import logger
+from scheduler.worker.commands.worker_commands import WorkerCommand
+
+
+class ShutdownCommand(WorkerCommand):
+ """shutdown command"""
+
+ command_name = "shutdown"
+
+ def process_command(self, connection: ConnectionType) -> None:
+ logger.info("Received shutdown command, sending SIGINT signal.")
+ pid = os.getpid()
+ os.kill(pid, signal.SIGINT)
diff --git a/scheduler/worker/commands/stop_job.py b/scheduler/worker/commands/stop_job.py
new file mode 100644
index 0000000..a4bfefa
--- /dev/null
+++ b/scheduler/worker/commands/stop_job.py
@@ -0,0 +1,57 @@
+import os
+import signal
+from typing import Dict, Any
+
+from scheduler.broker_types import ConnectionType
+from scheduler.redis_models import WorkerModel, JobModel
+from scheduler.settings import logger
+from scheduler.worker.commands.worker_commands import WorkerCommand, WorkerCommandError
+
+
+class StopJobCommand(WorkerCommand):
+ """stop-job command"""
+
+ command_name = "stop-job"
+
+ def __init__(self, *args, job_name: str, worker_name: str, **kwargs) -> None:
+ super().__init__(*args, worker_name=worker_name, **kwargs)
+ self.job_name = job_name
+ if self.job_name is None:
+ raise WorkerCommandError("job_name for kill-job command is required")
+
+ def command_payload(self) -> Dict[str, Any]:
+ return super().command_payload(job_name=self.job_name)
+
+ def process_command(self, connection: ConnectionType) -> None:
+ logger.debug(f"Received command to stop job {self.job_name}")
+ worker_model = WorkerModel.get(self.worker_name, connection)
+ job_model = JobModel.get(self.job_name, connection)
+ if worker_model is None:
+ logger.error(f"Worker {self.worker_name} not found")
+ return
+ if job_model is None:
+ logger.error(f"Job {self.job_name} not found")
+ return
+ if worker_model.pid == worker_model.job_execution_process_pid:
+ logger.warning(f"Job execution process ID and worker process id {worker_model.pid} are equal, skipping")
+ return
+ if not worker_model.job_execution_process_pid:
+ logger.error(f"Worker {self.worker_name} has no job execution process")
+ return
+ if worker_model.current_job_name != self.job_name:
+ logger.info(
+ f"{self.worker_name} working on job {worker_model.current_job_name}, "
+ f"not on {self.job_name}, kill-job command ignored."
+ )
+ return
+ worker_model.set_field("stopped_job_name", self.job_name, connection)
+ try:
+ pgid = os.getpgid(worker_model.job_execution_process_pid)
+ logger.debug(
+ f"worker_pid {worker_model.pid}, job_execution_process {worker_model.job_execution_process_pid}")
+ if pgid == worker_model.pid:
+ logger.error("No separate process for job execution, skipping")
+ return
+ os.killpg(pgid, signal.SIGTERM)
+ except ProcessLookupError as e:
+ logger.error(f"Error killing job {self.job_name}: {e}")
diff --git a/scheduler/worker/commands/suspend_worker.py b/scheduler/worker/commands/suspend_worker.py
new file mode 100644
index 0000000..01bb307
--- /dev/null
+++ b/scheduler/worker/commands/suspend_worker.py
@@ -0,0 +1,38 @@
+from scheduler.broker_types import ConnectionType
+from scheduler.redis_models import WorkerModel
+from scheduler.settings import logger
+from scheduler.worker.commands.worker_commands import WorkerCommand
+
+
+class SuspendWorkCommand(WorkerCommand):
+ """Suspend worker command"""
+
+ command_name = "suspend"
+
+ def process_command(self, connection: ConnectionType) -> None:
+ logger.debug(f"Received command to suspend worker {self.job_name}")
+ worker_model = WorkerModel.get(self.worker_name, connection)
+ if worker_model is None:
+ logger.warning(f"Worker {self.worker_name} not found")
+ if worker_model.is_suspended:
+ logger.warning(f"Worker {self.worker_name} already suspended")
+ return
+ worker_model.set_field("is_suspended", True, connection=connection)
+ logger.info(f"Worker {self.worker_name} suspended")
+
+
+class ResumeWorkCommand(WorkerCommand):
+ """Resume worker command"""
+
+ command_name = "resume"
+
+ def process_command(self, connection: ConnectionType) -> None:
+ logger.debug(f"Received command to resume worker {self.worker_name}")
+ worker_model = WorkerModel.get(self.worker_name, connection)
+ if worker_model is None:
+ logger.warning(f"Worker {self.worker_name} not found")
+ if not worker_model.is_suspended:
+ logger.warning(f"Worker {self.worker_name} not suspended and therefore can't be resumed")
+ return
+ worker_model.set_field("is_suspended", False, connection=connection)
+ logger.info(f"Worker {self.worker_name} resumed")
diff --git a/scheduler/worker/commands/worker_commands.py b/scheduler/worker/commands/worker_commands.py
new file mode 100644
index 0000000..0344c8d
--- /dev/null
+++ b/scheduler/worker/commands/worker_commands.py
@@ -0,0 +1,95 @@
+import json
+from abc import ABC
+from datetime import datetime, UTC
+from typing import Self, Type, Dict, Any
+
+from scheduler.broker_types import ConnectionType
+from scheduler.settings import logger
+
+_PUBSUB_CHANNEL_TEMPLATE: str = ":workers:pubsub:{}"
+
+
+class WorkerCommandError(Exception):
+ pass
+
+
+class WorkerCommand(ABC):
+ """Abstract class for commands to be sent to a worker and processed by worker"""
+
+ _registry: Dict[str, Type[Self]] = dict()
+ command_name: str = ""
+
+ def __init__(self, *args, worker_name: str, **kwargs) -> None:
+ self.worker_name = worker_name
+
+ def command_payload(self, **kwargs) -> Dict[str, Any]:
+ commands_channel = WorkerCommandsChannelListener._commands_channel(self.worker_name)
+ payload = {
+ "command": self.command_name,
+ "worker_name": self.worker_name,
+ "channel_name": commands_channel,
+ "created_at": datetime.now(tz=UTC).isoformat(),
+ }
+ if kwargs:
+ payload.update(kwargs)
+ return payload
+
+ def __str__(self) -> str:
+ return f"{self.command_name}[{self.command_payload()}]"
+
+ def process_command(self, connection: ConnectionType) -> None:
+ raise NotImplementedError
+
+ @classmethod
+ def __init_subclass__(cls, *args, **kwargs):
+ if cls is WorkerCommand:
+ return
+ if not cls.command_name:
+ raise NotImplementedError(f"{cls.__name__} must have a name attribute")
+ WorkerCommand._registry[cls.command_name] = cls
+
+ @classmethod
+ def from_payload(cls, payload: Dict[str, Any]) -> Type[Self]:
+ command_name = payload.get("command")
+ command_class = WorkerCommand._registry.get(command_name)
+ if command_class is None:
+ raise WorkerCommandError(f"Invalid command: {command_name}")
+ return command_class(**payload)
+
+
+def send_command(connection: ConnectionType, command: WorkerCommand) -> None:
+ """Send a command to the worker"""
+ payload = command.command_payload()
+ connection.publish(payload["channel_name"], json.dumps(payload))
+
+
+class WorkerCommandsChannelListener(object):
+ def __init__(self, connection: ConnectionType, worker_name: str) -> None:
+ self.connection = connection
+ self.pubsub_channel_name = WorkerCommandsChannelListener._commands_channel(worker_name)
+
+ @staticmethod
+ def _commands_channel(worker_name: str) -> str:
+ return _PUBSUB_CHANNEL_TEMPLATE.format(worker_name)
+
+ def start(self):
+ """Subscribe to this worker's channel"""
+ logger.info(f"Subscribing to channel {self.pubsub_channel_name}")
+ self.pubsub = self.connection.pubsub()
+ self.pubsub.subscribe(**{self.pubsub_channel_name: self.handle_payload})
+ self.pubsub_thread = self.pubsub.run_in_thread(sleep_time=0.2, daemon=True)
+
+ def stop(self):
+ """Unsubscribe from pubsub channel"""
+ if self.pubsub_thread:
+ logger.info(f"Unsubscribing from channel {self.pubsub_channel_name}")
+ self.pubsub_thread.stop()
+ self.pubsub_thread.join()
+ self.pubsub.unsubscribe()
+ self.pubsub.close()
+
+ def handle_payload(self, payload: str) -> None:
+ """Handle commands"""
+ command = WorkerCommand.from_payload(json.loads(payload["data"]))
+ logger.debug(f"Received command: {command}")
+ command.process_command(self.connection)
diff --git a/scheduler/worker/scheduler.py b/scheduler/worker/scheduler.py
new file mode 100644
index 0000000..c925967
--- /dev/null
+++ b/scheduler/worker/scheduler.py
@@ -0,0 +1,180 @@
+import os
+import time
+import traceback
+from datetime import datetime
+from enum import Enum
+from threading import Thread
+from typing import List, Set, Optional, Sequence, Dict
+
+import django
+from django.apps import apps
+
+from scheduler.broker_types import ConnectionType, MODEL_NAMES
+from scheduler.helpers.queues import Queue
+from scheduler.helpers.queues import get_queue
+from scheduler.helpers.utils import current_timestamp
+from scheduler.redis_models import SchedulerLock, JobModel, ScheduledJobRegistry
+from scheduler.settings import SCHEDULER_CONFIG, logger
+
+
+class SchedulerStatus(str, Enum):
+ STARTED = "started"
+ WORKING = "working"
+ STOPPED = "stopped"
+
+
+def _reschedule_tasks():
+ for model_name in MODEL_NAMES:
+ model = apps.get_model(app_label="scheduler", model_name=model_name)
+ enabled_jobs = model.objects.filter(enabled=True)
+ for item in enabled_jobs:
+ logger.debug(f"Rescheduling {str(item)}")
+ item.save()
+
+
+class WorkerScheduler:
+ def __init__(
+ self,
+ queues: Sequence[Queue],
+ connection: ConnectionType,
+ worker_name: str,
+ interval: Optional[int] = None,
+ ) -> None:
+ interval = interval or SCHEDULER_CONFIG.SCHEDULER_INTERVAL
+ self._queues = queues
+ self._scheduled_job_registries: List[ScheduledJobRegistry] = []
+ self.lock_acquisition_time = None
+ self._pool_class = connection.connection_pool.connection_class
+ self._pool_kwargs = connection.connection_pool.connection_kwargs.copy()
+ self._locks: Dict[str, SchedulerLock] = dict()
+ self.connection = connection
+ self.interval = interval
+ self._stop_requested = False
+ self._status = SchedulerStatus.STOPPED
+ self._thread = None
+ self._pid: Optional[int] = None
+ self.worker_name = worker_name
+
+ @property
+ def pid(self) -> Optional[int]:
+ return self._pid
+
+ def _should_reacquire_locks(self) -> bool:
+ """Returns True if lock_acquisition_time is longer than 10 minutes ago"""
+ if not self.lock_acquisition_time:
+ return True
+ seconds_since = (datetime.now() - self.lock_acquisition_time).total_seconds()
+ return seconds_since > SCHEDULER_CONFIG.SCHEDULER_FALLBACK_PERIOD_SECS
+
+ def _acquire_locks(self) -> Set[str]:
+ """Returns names of queue it successfully acquires lock on"""
+ successful_locks = set()
+ if self.pid is None:
+ self._pid = os.getpid()
+ queue_names = [queue.name for queue in self._queues]
+ logger.debug(
+ f"""[Scheduler {self.worker_name}/{self.pid}] Trying to acquire locks for {", ".join(queue_names)}""")
+ for queue in self._queues:
+ lock = SchedulerLock(queue.name)
+ if lock.acquire(self.pid, connection=queue.connection, expire=self.interval + 60):
+ self._locks[queue.name] = lock
+ successful_locks.add(queue.name)
+
+ # Always reset _scheduled_job_registries when acquiring locks
+ self.lock_acquisition_time = datetime.now()
+ self._scheduled_job_registries = []
+ for queue_name in self._locks:
+ queue = get_queue(queue_name)
+ self._scheduled_job_registries.append(queue.scheduled_job_registry)
+ logger.debug(f"[Scheduler {self.worker_name}/{self.pid}] Locks acquired for {', '.join(self._locks.keys())}")
+ return successful_locks
+
+ def start(self, burst=False) -> None:
+ locks = self._acquire_locks()
+ if len(locks) == 0:
+ return
+ if burst:
+ self.enqueue_scheduled_jobs()
+ self.release_locks()
+ return
+ self._status = SchedulerStatus.STARTED
+ self._thread = Thread(target=run_scheduler, args=(self,), name="scheduler-thread")
+ self._thread.start()
+
+ def request_stop_and_wait(self):
+ """Toggle self._stop_requested that's checked on every loop"""
+ logger.debug(f"[Scheduler {self.worker_name}/{self.pid}] Stop Scheduler requested")
+ self._stop_requested = True
+ if self._thread is not None:
+ self._thread.join()
+
+ def heartbeat(self):
+ """Updates the TTL on scheduler keys and the locks"""
+ lock_keys = ", ".join(self._locks.keys())
+ logger.debug(f"[Scheduler {self.worker_name}/{self.pid}] Scheduler updating lock for queue {lock_keys}")
+ with self.connection.pipeline() as pipeline:
+ for lock in self._locks.values():
+ lock.expire(self.connection, expire=self.interval + 60)
+ pipeline.execute()
+
+ def stop(self):
+ logger.info(
+ f"[Scheduler {self.worker_name}/{self.pid}] Stopping scheduler, releasing locks for {', '.join(self._locks.keys())}...")
+ self.release_locks()
+ self._status = SchedulerStatus.STOPPED
+
+ def release_locks(self):
+ """Release acquired locks"""
+ with self.connection.pipeline() as pipeline:
+ for lock in self._locks.values():
+ lock.release(self.connection)
+ pipeline.execute()
+
+ def work(self) -> None:
+ queue_names = [queue.name for queue in self._queues]
+ logger.info(
+ f"""[Scheduler {self.worker_name}/{self.pid}] Scheduler for {", ".join(queue_names)} started""")
+ django.setup()
+
+ while True:
+ if self._stop_requested:
+ self.stop()
+ break
+
+ if self._should_reacquire_locks():
+ self._acquire_locks()
+
+ self.enqueue_scheduled_jobs()
+ self.heartbeat()
+ time.sleep(self.interval)
+
+ def enqueue_scheduled_jobs(self) -> None:
+ """Enqueue jobs whose timestamp is in the past"""
+ self._status = SchedulerStatus.WORKING
+ _reschedule_tasks()
+
+ for registry in self._scheduled_job_registries:
+ timestamp = current_timestamp()
+ job_names = registry.get_jobs_to_schedule(timestamp)
+
+ if not job_names:
+ continue
+
+ queue = get_queue(registry.name)
+
+ jobs = JobModel.get_many(job_names, connection=self.connection)
+ with self.connection.pipeline() as pipeline:
+ for job in jobs:
+ if job is not None:
+ queue.enqueue_job(job, connection=pipeline, at_front=bool(job.at_front))
+ pipeline.execute()
+ self._status = SchedulerStatus.STARTED
+
+
+def run_scheduler(scheduler: WorkerScheduler):
+ try:
+ scheduler.work()
+ except: # noqa
+ logger.error(f"Scheduler [PID {os.getpid()}] raised an exception.\n{traceback.format_exc()}")
+ raise
+ logger.info(f"Scheduler with PID {os.getpid()} has stopped")
diff --git a/scheduler/worker/worker.py b/scheduler/worker/worker.py
new file mode 100644
index 0000000..340865b
--- /dev/null
+++ b/scheduler/worker/worker.py
@@ -0,0 +1,902 @@
+import contextlib
+import errno
+import math
+import os
+import random
+import signal
+import socket
+import sys
+import threading
+import time
+import traceback
+import warnings
+from datetime import timedelta
+from enum import Enum
+from random import shuffle
+from resource import struct_rusage
+from types import FrameType
+from typing import List, Optional, Tuple, Any, Iterable, Self
+
+import scheduler
+from scheduler.broker_types import (
+ ConnectionType,
+ TimeoutErrorTypes,
+ ConnectionErrorTypes,
+ WatchErrorTypes,
+ ResponseErrorTypes,
+)
+from scheduler.helpers.queues import get_queue
+from scheduler.redis_models import WorkerModel, JobModel, JobStatus, KvLock, DequeueTimeout
+from scheduler.settings import SCHEDULER_CONFIG, logger
+from .commands import WorkerCommandsChannelListener
+from .scheduler import WorkerScheduler
+from ..redis_models.worker import WorkerStatus
+
+try:
+ from signal import SIGKILL
+except ImportError:
+ from signal import SIGTERM as SIGKILL
+
+from contextlib import suppress
+
+from scheduler.helpers.queues import Queue, perform_job
+from scheduler.timeouts import (
+ JobExecutionMonitorTimeoutException,
+ JobTimeoutException,
+)
+from scheduler.helpers.utils import utcnow, current_timestamp
+
+try:
+ from setproctitle import setproctitle as setprocname
+except ImportError:
+
+ def setprocname(*args, **kwargs): # noqa
+ pass
+
+
+class StopRequested(Exception):
+ pass
+
+
+class WorkerNotFound(Exception):
+ pass
+
+
+_signames = dict(
+ (getattr(signal, signame), signame) for signame in dir(signal) if signame.startswith("SIG") and "_" not in signame
+)
+
+
+def signal_name(signum):
+ try:
+ return signal.Signals(signum).name
+ except KeyError:
+ return "SIG_UNKNOWN"
+ except ValueError:
+ return "SIG_UNKNOWN"
+
+
+class DequeueStrategy(str, Enum):
+ DEFAULT = "default"
+ ROUND_ROBIN = "round_robin"
+ RANDOM = "random"
+
+
+class QueueLock(KvLock):
+ def __init__(self, queue_name: str) -> None:
+ super().__init__(f"queue:{queue_name}")
+
+
+class Worker:
+ queue_class = Queue
+
+ # factor to increase connection_wait_time in case of continuous connection failures.
+ exponential_backoff_factor = 2.0
+ # Max Wait time (in seconds) after which exponential_backoff_factor won't be applicable.
+ max_connection_wait_time = 60.0
+
+ @classmethod
+ def from_model(cls, model: WorkerModel) -> Self:
+ connection = get_queue(model.queue_names[0]).connection
+ res = cls(
+ queues=[get_queue(queue_name) for queue_name in model.queue_names],
+ name=model.name,
+ connection=connection,
+ with_scheduler=False,
+ model=model,
+ )
+ return res
+
+ def __init__(
+ self,
+ queues,
+ name: str,
+ connection: Optional[ConnectionType] = None,
+ maintenance_interval: int = SCHEDULER_CONFIG.DEFAULT_MAINTENANCE_TASK_INTERVAL,
+ job_monitoring_interval=SCHEDULER_CONFIG.DEFAULT_JOB_MONITORING_INTERVAL,
+ dequeue_strategy: DequeueStrategy = DequeueStrategy.DEFAULT,
+ disable_default_exception_handler: bool = False,
+ fork_job_execution: bool = True,
+ with_scheduler: bool = True,
+ burst: bool = False,
+ model: Optional[WorkerModel] = None,
+ ): # noqa
+ self.fork_job_execution = fork_job_execution
+ self.job_monitoring_interval = job_monitoring_interval
+ self.maintenance_interval = maintenance_interval
+
+ connection = self._set_connection(connection)
+ self.connection = connection
+
+ self.queues = [
+ (Queue(name=q, connection=connection) if isinstance(q, str) else q) for q in _ensure_list(queues)
+ ]
+ self.name: str = name
+ self._validate_name_uniqueness()
+ self._ordered_queues = self.queues[:]
+
+ self._is_job_execution_process: bool = False
+
+ self.scheduler: Optional[WorkerScheduler] = None
+ self._command_listener = WorkerCommandsChannelListener(connection, self.name)
+ self._dequeue_strategy = dequeue_strategy
+
+ self.disable_default_exception_handler = disable_default_exception_handler
+ self.with_scheduler = with_scheduler
+ self.burst = burst
+ self._model = (
+ model
+ if model is not None
+ else WorkerModel(
+ name=self.name,
+ queue_names=[queue.name for queue in self.queues],
+ birth=None,
+ last_heartbeat=None,
+ pid=os.getpid(),
+ hostname=socket.gethostname(),
+ ip_address=_get_ip_address_from_connection(self.connection, self.name),
+ version=scheduler.__version__,
+ python_version=sys.version,
+ state=WorkerStatus.CREATED,
+ )
+ )
+ self._model.save(self.connection)
+
+ @property
+ def _pid(self) -> int:
+ return self._model.pid
+
+ def should_run_maintenance_tasks(self):
+ """Maintenance tasks should run on first startup or every 10 minutes."""
+ if self._model.last_cleaned_at is None:
+ return True
+ if (utcnow() - self._model.last_cleaned_at) > timedelta(seconds=self.maintenance_interval):
+ return True
+ return False
+
+ def _set_connection(self, connection: ConnectionType) -> ConnectionType:
+ """Configures the Broker connection to have a socket timeout.
+ This should timouet the connection in case any specific command hangs at any given time (eg. BLPOP).
+ If the connection provided already has a `socket_timeout` defined, skips.
+
+ :param connection: Broker connection to configure.
+ """
+ current_socket_timeout = connection.connection_pool.connection_kwargs.get("socket_timeout")
+ if current_socket_timeout is None:
+ timeout_config = {"socket_timeout": self.connection_timeout}
+ connection.connection_pool.connection_kwargs.update(timeout_config)
+ return connection
+
+ def clean_registries(self):
+ """Runs maintenance jobs on each Queue's registries."""
+ for queue in self.queues:
+ # If there are multiple workers running, we only want 1 worker
+ # to run clean_registries().
+ queue_lock = QueueLock(self.name)
+ if queue_lock.acquire(1, expire=899, connection=self.connection):
+ logger.info(f"[Worker {self.name}/{self._pid}]: Cleaning registries for queue: {queue.name}")
+ queue.clean_registries()
+ WorkerModel.cleanup(queue.connection, queue.name)
+ queue_lock.release(self.connection)
+ self._model.last_cleaned_at = utcnow()
+
+ def _install_signal_handlers(self) -> None:
+ """Installs signal handlers for handling SIGINT and SIGTERM gracefully."""
+ if threading.current_thread() is not threading.main_thread():
+ logger.warning(
+ f"[Worker {self.name}/{self._pid}]: Running in a thread, skipping signal handlers installation"
+ )
+ return
+ signal.signal(signal.SIGINT, self.request_stop)
+ signal.signal(signal.SIGTERM, self.request_stop)
+
+ def work(
+ self,
+ max_jobs: Optional[int] = None,
+ max_idle_time: Optional[int] = None,
+ ) -> bool:
+ """Starts the work loop.
+
+ Pops and performs all jobs on the current list of queues. When all
+ queues are empty, block and wait for new jobs to arrive on any of the
+ queues, unless `burst` mode is enabled.
+ If `max_idle_time` is provided, worker will die when it's idle for more than the provided value.
+
+ The return value indicates whether any jobs were processed.
+
+ :param max_jobs: Max number of jobs. Defaults to None.
+ :param max_idle_time: Max seconds for a worker to be idle. Defaults to None.
+ :return: Whether any jobs were processed.
+ """
+ self.bootstrap()
+
+ self._install_signal_handlers()
+ try:
+ while True:
+ self.refresh()
+ self._check_for_suspension(self.burst)
+
+ if self.should_run_maintenance_tasks():
+ self.run_maintenance_tasks()
+
+ if self._model.shutdown_requested_date:
+ logger.info(f"[Worker {self.name}/{self._pid}]: stopping on request")
+ break
+
+ timeout = None if self.burst else (SCHEDULER_CONFIG.DEFAULT_WORKER_TTL - 15)
+ job, queue = self.dequeue_job_and_maintain_ttl(timeout, max_idle_time)
+ if job is None:
+ if self.burst:
+ logger.info(f"[Worker {self.name}/{self._pid}]: done, quitting")
+ break
+ elif max_idle_time is not None:
+ logger.info(f"[Worker {self.name}/{self._pid}]: idle for {max_idle_time} seconds, quitting")
+ break
+ continue
+
+ self.execute_job(job, queue)
+
+ self.refresh()
+ with self.connection.pipeline() as pipeline:
+ self._model.heartbeat(pipeline)
+ self._model.save(pipeline)
+ pipeline.execute()
+ if max_jobs is not None and self._model.completed_jobs >= max_jobs:
+ logger.info(
+ f"[Worker {self.name}/{self._pid}]: finished executing {self._model.completed_jobs} jobs, quitting"
+ )
+ break
+ return self._model.completed_jobs > 0
+
+ except TimeoutErrorTypes:
+ logger.error(f"[Worker {self.name}/{self._pid}]: Redis connection timeout, quitting...")
+ except StopRequested:
+ logger.info(f"[Worker {self.name}/{self._pid}]: Worker was requested to stop, quitting")
+ pass
+ except SystemExit: # Cold shutdown detected
+ raise
+ except Exception:
+ logger.error(f"[Worker {self.name}/{self._pid}]: found an unhandled exception, quitting...", exc_info=True)
+ finally:
+ self.teardown()
+
+ def handle_job_failure(self, job: JobModel, queue: Queue, exc_string=""):
+ """
+ Handles the failure or an executing job by:
+ 1. Setting the job status to failed
+ 2. Removing the job from active_job_registry
+ 3. Setting the workers current job to None
+ 4. Add the job to FailedJobRegistry
+ `save_exc_to_job` should only be used for testing purposes
+ """
+ logger.debug(f"[Worker {self.name}/{self._pid}]: Handling failed execution of job {job.name}")
+ # check whether a job was stopped intentionally and set the job status appropriately if it was this job.
+
+ new_job_status = JobStatus.STOPPED if self._model.get_field("stopped_job_name",
+ self.connection) == job.name else JobStatus.FAILED
+ self._model.current_job_name = None
+ with self.connection.pipeline() as pipeline:
+ if new_job_status == JobStatus.STOPPED:
+ logger.debug(f"[Worker {self.name}/{self._pid}]: Job was stopped, setting status to STOPPED")
+ else:
+ logger.debug(f"[Worker {self.name}/{self._pid}]: Job has failed, setting status to FAILED")
+
+ queue.active_job_registry.delete(connection=pipeline, job_name=job.name)
+
+ if not self.disable_default_exception_handler:
+ queue.job_handle_failure(new_job_status, job, exc_string, connection=pipeline)
+ with suppress(ConnectionErrorTypes):
+ pipeline.execute()
+
+ self._model.current_job_working_time = 0
+ if job.status == JobStatus.FAILED:
+ self._model.failed_job_count += 1
+ self._model.completed_jobs += 1
+ if job.started_at and job.ended_at:
+ self._model.total_working_time_ms += ((job.ended_at - job.started_at).microseconds / 1000.0)
+ self._model.save(connection=self.connection)
+
+ try:
+ pipeline.execute()
+ except Exception:
+ # Ensure that custom exception handlers are called even if the Broker is down
+ pass
+
+ def bootstrap(self):
+ """Bootstraps the worker.
+ Runs the basic tasks that should run when the worker actually starts working.
+ Used so that new workers can focus on the work loop implementation rather
+ than the full bootstraping process.
+ """
+ self.worker_start()
+ logger.info(f"[Worker {self.name}/{self._pid}]: Worker {self.name} started with PID {os.getpid()}")
+ self._command_listener.start()
+ if self.with_scheduler:
+ self.scheduler = WorkerScheduler(self.queues, worker_name=self.name, connection=self.connection)
+ self.scheduler.start(burst=self.burst)
+ self._model.has_scheduler = True
+ self._model.save(connection=self.connection)
+ qnames = [queue.name for queue in self.queues]
+ logger.info(f"""[Worker {self.name}/{self._pid}]: Listening to queues {", ".join(qnames)}...""")
+
+ def _check_for_suspension(self, burst: bool) -> None:
+ """Check to see if workers have been suspended by `rq suspend`"""
+ before_state = None
+ notified = False
+ while self._model.shutdown_requested_date is not None and self._model.is_suspended:
+ if burst:
+ logger.info(f"[Worker {self.name}/{self._pid}]: Suspended in burst mode, exiting, "
+ f"Note: There could still be unfinished jobs on the queue")
+ raise StopRequested
+
+ if not notified:
+ logger.info(f"[Worker {self.name}/{self._pid}]: Worker suspended, trigger ResumeCommand")
+ before_state = self._model.state
+ self._model.set_field("state", WorkerStatus.SUSPENDED, connection=self.connection)
+ notified = True
+ time.sleep(1)
+
+ if before_state:
+ self._model.set_field("state", before_state, connection=self.connection)
+
+ def run_maintenance_tasks(self):
+ """Runs periodic maintenance tasks, these include:
+ 1. Check if scheduler should be started.
+ 2. Cleaning registries
+ """
+ if self.with_scheduler:
+ self.scheduler = WorkerScheduler(self.queues, worker_name=self.name, connection=self.connection)
+ self.scheduler.start(burst=self.burst)
+ self._model.has_scheduler = True
+ self._model.save(connection=self.connection)
+ self.clean_registries()
+
+ def dequeue_job_and_maintain_ttl(
+ self, timeout: Optional[int], max_idle_time: Optional[int] = None
+ ) -> Tuple[JobModel, Queue]:
+ """Dequeues a job while maintaining the TTL.
+ :param timeout: The timeout for the dequeue operation.
+ :param max_idle_time: The maximum idle time for the worker.
+ :returns: A tuple with the job and the queue.
+ """
+ qnames = ",".join([queue.name for queue in self.queues])
+
+ self._model.set_field("state", WorkerStatus.IDLE, connection=self.connection)
+ self.procline(f"Listening on {qnames}")
+ logger.debug(f"[Worker {self.name}/{self._pid}]: listening on {qnames}...")
+ connection_wait_time = 1.0
+ idle_since = utcnow()
+ idle_time_left = max_idle_time
+ job, queue = None, None
+ while True:
+ try:
+ self._model.heartbeat(self.connection)
+
+ if self.should_run_maintenance_tasks():
+ self.run_maintenance_tasks()
+
+ if timeout is not None and idle_time_left is not None:
+ timeout = min(timeout, idle_time_left)
+
+ logger.debug(
+ f"[Worker {self.name}/{self._pid}]: Fetching jobs on queues {qnames} and timeout {timeout}"
+ )
+ job, queue = Queue.dequeue_any(self._ordered_queues, timeout, connection=self.connection)
+ if job is not None:
+ self.reorder_queues(reference_queue=queue)
+ logger.info(f"[Worker {self.name}/{self._pid}]: Popped job `{job.name}` from `{queue.name}`")
+ break
+ except DequeueTimeout:
+ if max_idle_time is not None:
+ idle_for = (utcnow() - idle_since).total_seconds()
+ idle_time_left = math.ceil(max_idle_time - idle_for)
+ if idle_time_left <= 0:
+ break
+ except ConnectionErrorTypes as conn_err:
+ logger.error(
+ f"[Worker {self.name}/{self._pid}]: Could not connect to Broker: {conn_err} Retrying in {connection_wait_time} seconds..."
+ )
+ time.sleep(connection_wait_time)
+ connection_wait_time *= self.exponential_backoff_factor
+ connection_wait_time = min(connection_wait_time, self.max_connection_wait_time)
+
+ self._model.heartbeat(self.connection)
+ return job, queue
+
+ @property
+ def connection_timeout(self) -> int:
+ return SCHEDULER_CONFIG.DEFAULT_WORKER_TTL - 5
+
+ def procline(self, message):
+ """Changes the current procname for the process.
+
+ This can be used to make `ps -ef` output more readable.
+ """
+ setprocname(f"{self._model._key}: {message}")
+
+ def _validate_name_uniqueness(self):
+ """Validates that the worker name is unique."""
+ worker_model = WorkerModel.get(self.name, connection=self.connection)
+ if worker_model is not None and worker_model.death is None:
+ raise ValueError(f"There exists an active worker named {self.name!r} already")
+
+ def worker_start(self):
+ """Registers its own birth."""
+ logger.debug(f"[Worker {self.name}/{self._pid}]: Registering birth")
+ now = utcnow()
+ self._model.birth = now
+ self._model.last_heartbeat = now
+ self._model.state = WorkerStatus.STARTED
+ self._model.save(self.connection)
+
+ def kill_job_execution_process(self, sig: signal.Signals = SIGKILL):
+ """Kill the job execution process but catch "No such process" error has the job execution process could already
+ be dead.
+
+ :param sig: Optional, Defaults to SIGKILL.
+ """
+ try:
+ os.killpg(os.getpgid(self._model.job_execution_process_pid), sig)
+ logger.info(
+ f"[Worker {self.name}/{self._pid}]: Killed job execution process pid {self._model.job_execution_process_pid}"
+ )
+ except OSError as e:
+ if e.errno == errno.ESRCH:
+ # "No such process" is fine with us
+ logger.debug("[Worker {self.name}/{self._pid}]: Job execution process already dead")
+ else:
+ raise
+
+ def wait_for_job_execution_process(self) -> Tuple[Optional[int], Optional[int], Optional[struct_rusage]]:
+ """Waits for the job execution process to complete.
+ Uses `0` as argument as to include "any child in the process group of the current process".
+ """
+ pid = stat = rusage = None
+ with contextlib.suppress(ChildProcessError): # ChildProcessError: [Errno 10] No child processes
+ pid, stat, rusage = os.wait4(self._model.job_execution_process_pid, 0)
+ return pid, stat, rusage
+
+ def request_force_stop(self, signum: int, frame: Optional[FrameType]):
+ """Terminates the application (cold shutdown).
+
+ :param signum: Signal number
+ :param frame: Frame
+ :raises SystemExit: SystemExit
+ """
+ # When a worker is run through a worker pool, it may receive duplicate signals.
+ # One is sent by the pool when it calls `pool.stop_worker()` and another is sent by the OS
+ # when a user hits Ctrl+C. In this case, if we receive the second signal within 1 second, we ignore it.
+ shutdown_date = self._model.shutdown_requested_date
+ if shutdown_date is not None and (utcnow() - shutdown_date) < timedelta(seconds=1):
+ logger.debug(
+ f"[Worker {self.name}/{self._pid}]: Shutdown signal ignored, received twice in less than 1 second"
+ )
+ return
+
+ logger.warning(f"[Worker {self.name}/{self._pid}]: Cold shut down")
+
+ # Take down the job execution process with the worker
+ if self._model.job_execution_process_pid:
+ logger.debug(
+ f"[Worker {self.name}/{self._pid}]: Taking down job execution process {self._model.job_execution_process_pid} with me"
+ )
+ self.kill_job_execution_process()
+ self.wait_for_job_execution_process()
+ raise SystemExit()
+
+ def request_stop(self, signum: int, frame: Optional[FrameType]) -> None:
+ """Stops the current worker loop but waits for child processes to end gracefully (warm shutdown).
+ :param signum: Signal number
+ :param frame: Frame
+ """
+ logger.debug(f"[Worker {self.name}/{self._pid}]: Got signal {signal_name(signum)}")
+ self._model.set_field("shutdown_requested_date", utcnow(), self.connection)
+
+ signal.signal(signal.SIGINT, self.request_force_stop)
+ signal.signal(signal.SIGTERM, self.request_force_stop)
+
+ logger.info(f"[Worker {self.name}/{self._pid}]: warm shut down requested")
+
+ self.stop_scheduler()
+ # If shutdown is requested in the middle of a job, wait until finish before shutting down and save the request.
+ if self._model.state == WorkerStatus.BUSY:
+ self._model.set_field("shutdown_requested_date", utcnow(), connection=self.connection)
+
+ logger.debug(
+ f"[Worker {self.name}/{self._pid}]: Stopping after current job execution process is finished. "
+ f"Press Ctrl+C again for a cold shutdown."
+ )
+ else:
+ raise StopRequested()
+
+ def reorder_queues(self, reference_queue: Queue):
+ """Reorder the queues according to the strategy.
+ As this can be defined both in the `Worker` initialization or in the `work` method,
+ it doesn't take the strategy directly, but rather uses the private `_dequeue_strategy` attribute.
+
+ :param reference_queue: The queues to reorder
+ """
+ if self._dequeue_strategy is None:
+ self._dequeue_strategy = DequeueStrategy.DEFAULT
+
+ if self._dequeue_strategy not in [e.value for e in DequeueStrategy]:
+ raise ValueError(
+ f"""[Worker {self.name}/{self._pid}]: Dequeue strategy should be one of {", ".join([e.value for e in DequeueStrategy])}"""
+ )
+ if self._dequeue_strategy == DequeueStrategy.DEFAULT:
+ return
+ if self._dequeue_strategy == DequeueStrategy.ROUND_ROBIN:
+ pos = self._ordered_queues.index(reference_queue)
+ self._ordered_queues = self._ordered_queues[pos + 1:] + self._ordered_queues[: pos + 1]
+ return
+ if self._dequeue_strategy == DequeueStrategy.RANDOM:
+ shuffle(self._ordered_queues)
+ return
+
+ def teardown(self) -> None:
+ if self._is_job_execution_process:
+ return
+ self.stop_scheduler()
+ self._command_listener.stop()
+ self._model.delete(self.connection)
+
+ def stop_scheduler(self):
+ """Stop the scheduler thread.
+ Will send the kill signal to the scheduler process,
+ if there's an OSError, just passes and `join()`'s the scheduler process, waiting for the process to finish.
+ """
+ if self.scheduler is None:
+ return
+ logger.info(f"[Worker {self.name}/{self._pid}]: Stopping scheduler thread {self.scheduler.pid}")
+ self.scheduler.request_stop_and_wait()
+ logger.debug(f"[Worker {self.name}/{self._pid}]: Scheduler thread stopped")
+ self.scheduler = None
+
+ def refresh(self, update_queues: bool = False):
+ """Refreshes the worker data.
+ It will get the data from the datastore and update the Worker's attributes
+ """
+ self._model = WorkerModel.get(self.name, connection=self.connection)
+ if self._model is None:
+ msg = f"[Worker {self.name}/{self._pid}]: Worker {self.name} not found, quitting..."
+ logger.error(msg)
+ raise WorkerNotFound(msg)
+ if update_queues:
+ self.queues = [Queue(name=queue_name, connection=self.connection) for queue_name in self._model.queue_names]
+
+ def fork_job_execution_process(self, job: JobModel, queue: Queue) -> None:
+ """Spawns a job execution process to perform the actual work and passes it a job.
+ This is where the `fork()` actually happens.
+
+ :param job: The job to be executed
+ :param queue: The queue from which the job was dequeued
+ """
+ child_pid = os.fork()
+ os.environ["SCHEDULER_WORKER_NAME"] = self.name
+ os.environ["SCHEDULER_JOB_NAME"] = job.name
+ if child_pid == 0: # Child process/Job executor process to run the job
+ os.setsid()
+ self._model.job_execution_process_pid = os.getpid()
+ self._model.save(connection=self.connection)
+ self.execute_in_separate_process(job, queue)
+ os._exit(0) # just in case
+ else: # Parent worker process
+ logger.debug(
+ f"[Worker {self.name}/{self._pid}]: Forking job execution process, job_execution_process_pid={child_pid}")
+ self._model.job_execution_process_pid = child_pid
+ self._model.save(connection=self.connection)
+ self.procline(f"Forked {child_pid} at {time.time()}")
+
+ def get_heartbeat_ttl(self, job: JobModel) -> int:
+ """Get's the TTL for the next heartbeat.
+ :param job: The Job
+ :return: The heartbeat TTL
+ """
+ if job.timeout and job.timeout > 0:
+ remaining_execution_time = int(job.timeout - self._model.current_job_working_time)
+ return min(remaining_execution_time, self.job_monitoring_interval) + 60
+ else:
+ return self.job_monitoring_interval + 60
+
+ def monitor_job_execution_process(self, job: JobModel, queue: Queue) -> None:
+ """The worker will monitor the job execution process and make sure that it either executes successfully or the
+ status of the job is set to failed
+
+ :param job: The Job
+ :param queue: The Queue
+ """
+ retpid = ret_val = rusage = None
+ job.started_at = utcnow()
+ while True:
+ try:
+ with SCHEDULER_CONFIG.DEATH_PENALTY_CLASS(
+ self.job_monitoring_interval, JobExecutionMonitorTimeoutException
+ ):
+ retpid, ret_val, rusage = self.wait_for_job_execution_process()
+ break
+ except JobExecutionMonitorTimeoutException:
+ # job execution process has not exited yet and is still running. Send a heartbeat to keep the worker alive.
+ self._model.set_current_job_working_time((utcnow() - job.started_at).total_seconds(), self.connection)
+
+ # Kill the job from this side if something is really wrong (interpreter lock/etc).
+ if job.timeout != -1 and self._model.current_job_working_time > (job.timeout + 60):
+ self._model.heartbeat(self.connection, self.job_monitoring_interval + 60)
+ self.kill_job_execution_process()
+ self.wait_for_job_execution_process()
+ break
+
+ self.maintain_heartbeats(job, queue)
+
+ except OSError as e:
+ # In case we encountered an OSError due to EINTR (which is
+ # caused by a SIGINT or SIGTERM signal during
+ # os.waitpid()), we simply ignore it and enter the next
+ # iteration of the loop, waiting for the child to end. In
+ # any other case, this is some other unexpected OS error,
+ # which we don't want to catch, so we re-raise those ones.
+ if e.errno != errno.EINTR:
+ raise
+ # Send a heartbeat to keep the worker alive.
+ self._model.heartbeat(self.connection)
+
+ self._model = WorkerModel.get(self.name, connection=self.connection)
+ self._model.current_job_working_time = 0
+ self._model.save(connection=self.connection)
+ if ret_val == os.EX_OK: # The process exited normally.
+ return
+
+ job_status = job.get_status(self.connection)
+ stopped_job_name = self._model.get_field("stopped_job_name", self.connection)
+
+ if job_status is None: # Job completed and its ttl has expired
+ logger.debug(f"[Worker {self.name}/{self._pid}]: Job status is None, completed and expired?")
+ return
+ elif stopped_job_name == job.name: # job execution process killed deliberately
+ logger.warning(f"[Worker {self.name}/{self._pid}]: Job stopped by user, moving job to failed-jobs-registry")
+ if job.stopped_callback:
+ job.stopped_callback()
+ self.handle_job_failure(
+ job, queue=queue, exc_string="Job stopped by user, job execution process terminated."
+ )
+ elif job_status not in [JobStatus.FINISHED, JobStatus.FAILED]:
+ if not job.ended_at:
+ job.ended_at = utcnow()
+
+ # Unhandled failure: move the job to the failed queue
+ signal_msg = f" (signal {os.WTERMSIG(ret_val)})" if ret_val and os.WIFSIGNALED(ret_val) else ""
+ exc_string = f"job-execution-process terminated unexpectedly; waitpid returned {ret_val}{signal_msg}; "
+ logger.warning(
+ f"[Worker {self.name}/{self._pid}]: Moving job to {queue.name}/failed-job-registry ({exc_string})"
+ )
+
+ self.handle_job_failure(job, queue=queue, exc_string=exc_string)
+
+ def execute_job(self, job: JobModel, queue: Queue):
+ """Spawns a job execution process to perform the actual work and passes it a job.
+ The worker will wait for the job execution process and make sure it executes within the given timeout bounds, or
+ will end the job execution process with SIGALRM.
+ """
+ if self.fork_job_execution:
+ self._model.set_field("state", WorkerStatus.BUSY, connection=self.connection)
+ self.fork_job_execution_process(job, queue)
+ self.monitor_job_execution_process(job, queue)
+ self._model.set_field("state", WorkerStatus.IDLE, connection=self.connection)
+ else:
+ self._model.set_field("state", WorkerStatus.BUSY, connection=self.connection)
+ self.perform_job(job, queue)
+ self._model.set_field("state", WorkerStatus.IDLE, connection=self.connection)
+
+ def maintain_heartbeats(self, job: JobModel, queue: Queue):
+ """Updates worker and job's last heartbeat field."""
+ with self.connection.pipeline() as pipeline:
+ self._model.heartbeat(pipeline, self.job_monitoring_interval + 60)
+ ttl = self.get_heartbeat_ttl(job)
+
+ queue.active_job_registry.add(pipeline, self.name, current_timestamp() + ttl, update_existing_only=False)
+ results = pipeline.execute()
+ if results[2] == 1:
+ job.delete(self.connection)
+
+ def execute_in_separate_process(self, job: JobModel, queue: Queue):
+ """This is the entry point of the newly spawned job execution process.
+ After fork()'ing, assure we are generating random sequences that are different from the worker.
+
+ os._exit() is the way to exit from child processes after a fork(), in contrast to the regular sys.exit()
+ """
+ random.seed()
+ self.setup_job_execution_process_signals()
+ self._is_job_execution_process = True
+ try:
+ self.perform_job(job, queue)
+ except: # noqa
+ os._exit(1)
+ os._exit(0)
+
+ def setup_job_execution_process_signals(self):
+ """Setup signal handing for the newly spawned job execution process
+
+ Always ignore Ctrl+C in the job execution process, as it might abort the currently running job.
+
+ The main worker catches the Ctrl+C and requests graceful shutdown after the current work is done.
+ When cold shutdown is requested, it kills the current job anyway.
+ """
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ def worker_before_execution(self, job: JobModel, connection: ConnectionType) -> None:
+ logger.debug(f"[Worker {self.name}/{self._pid}]: Preparing for execution of job: `{job.name}`")
+ current_pid = os.getpid()
+ self._model.current_job_name = job.name
+ self._model.current_job_working_time = 0
+ self._model.job_execution_process_pid = current_pid
+ heartbeat_ttl = self.get_heartbeat_ttl(job)
+ self._model.heartbeat(self.connection, heartbeat_ttl)
+ self.procline(
+ f"[Worker {self.name}/{self._pid}]: Processing {job.func_name} from {job.queue_name} since {time.time()}"
+ )
+ self._model.save(connection=connection)
+
+ def handle_job_success(self, job: JobModel, return_value: Any, queue: Queue):
+ """Handles the successful execution of certain job.
+ It will remove the job from the `active_job_registry`, adding it to the `SuccessfulJobRegistry`,
+ and run a few maintenance tasks including:
+ - Resting the current job name
+ - Enqueue dependents
+ - Incrementing the job count and working time
+ - Handling of the job successful execution
+
+ Runs within a loop with the `watch` method so that protects interactions with dependents keys.
+
+ :param job: The job that was successful.
+ :param queue: The queue
+ """
+ logger.debug(f"[Worker {self.name}/{self._pid}]: Handling successful execution of job {job.name}")
+ with self.connection.pipeline() as pipeline:
+ while True:
+ try:
+ queue.job_handle_success(
+ job, result=return_value, result_ttl=job.success_ttl, connection=pipeline
+ )
+ self._model.current_job_name = None
+ self._model.successful_job_count += 1
+ self._model.completed_jobs += 1
+ self._model.total_working_time_ms += ((job.ended_at - job.started_at).microseconds / 1000.0)
+ self._model.save(connection=self.connection)
+
+ job.expire(job.success_ttl, connection=pipeline)
+ logger.debug(f"[Worker {self.name}/{self._pid}]: Removing job {job.name} from active_job_registry")
+ queue.active_job_registry.delete(pipeline, job.name)
+ pipeline.execute()
+ logger.debug(
+ f"[Worker {self.name}/{self._pid}]: Finished handling successful execution of job {job.name}"
+ )
+ break
+ except WatchErrorTypes:
+ continue
+
+ def perform_job(self, job: JobModel, queue: Queue) -> bool:
+ """Performs the actual work of a job.
+ Called from the process executing the job (forked job execution process).
+
+ :param job: The job to perform
+ :param queue: The queue the job was dequeued from
+ :returns: True after finished.
+ """
+ logger.debug(f"[Worker {self.name}/{self._pid}]: Performing {job.name} code.")
+
+ try:
+ with self.connection.pipeline() as pipeline:
+ self.worker_before_execution(job, connection=pipeline)
+ job.prepare_for_execution(self.name, queue.active_job_registry, connection=pipeline)
+ pipeline.execute()
+ timeout = job.timeout or SCHEDULER_CONFIG.DEFAULT_JOB_TIMEOUT
+ with SCHEDULER_CONFIG.DEATH_PENALTY_CLASS(timeout, JobTimeoutException, job_name=job.name):
+ logger.debug(f"[Worker {self.name}/{self._pid}]: Performing job `{job.name}`...")
+ rv = perform_job(job, self.connection)
+ logger.debug(f"[Worker {self.name}/{self._pid}]: Finished performing job `{job.name}`")
+
+ self.handle_job_success(job=job, return_value=rv, queue=queue)
+ except: # NOQA
+ logger.debug(f"[Worker {self.name}/{self._pid}]: Job {job.name} raised an exception.")
+ exc_info = sys.exc_info()
+ exc_string = "".join(traceback.format_exception(*exc_info))
+
+ self.handle_job_failure(job=job, exc_string=exc_string, queue=queue)
+ self.handle_exception(job, *exc_info)
+ return False
+
+ logger.info(f"[Worker {self.name}/{self._pid}]: queue:{queue.name}/job:{job.name} performed.")
+ logger.debug(f"[Worker {self.name}/{self._pid}]: job:{job.name} result: {str(rv)}")
+
+ return True
+
+ def handle_exception(self, job: JobModel, *exc_info):
+ """Walks the exception handler stack to delegate exception handling.
+ If the job cannot be deserialized, it will raise when func_name or
+ the other properties are accessed, which will stop exceptions from
+ being properly logged, so we guard against it here.
+ """
+ logger.debug(f"Handling exception caused while performing job:{job.name}.")
+ exc_string = "".join(traceback.format_exception(*exc_info))
+
+ extra = {
+ "func": job.func_name,
+ "arguments": job.args,
+ "kwargs": job.kwargs,
+ Queue: job.queue_name,
+ "job_name": job.name,
+ }
+ func_name = job.func_name
+
+ # func_name
+ logger.error(
+ f"[Worker {self.name}/{self._pid}]: exception raised while executing ({func_name})\n{exc_string}",
+ extra=extra,
+ )
+
+
+class SimpleWorker(Worker):
+ def execute_job(self, job: JobModel, queue: Queue):
+ """Execute job in same thread/process, do not fork()"""
+ self._model.set_field("state", WorkerStatus.BUSY, connection=self.connection)
+ self.perform_job(job, queue)
+ self._model.set_field("state", WorkerStatus.IDLE, connection=self.connection)
+
+
+class RoundRobinWorker(Worker):
+ """Modified version of Worker that dequeues jobs from the queues using a round-robin strategy."""
+
+ def reorder_queues(self, reference_queue):
+ pos = self._ordered_queues.index(reference_queue)
+ self._ordered_queues = self._ordered_queues[pos + 1:] + self._ordered_queues[: pos + 1]
+
+
+class RandomWorker(Worker):
+ """Modified version of Worker that dequeues jobs from the queues using a random strategy."""
+
+ def reorder_queues(self, reference_queue):
+ shuffle(self._ordered_queues)
+
+
+def _get_ip_address_from_connection(connection: ConnectionType, client_name: str) -> str:
+ try:
+ connection.client_setname(client_name)
+ except ResponseErrorTypes:
+ warnings.warn("CLIENT SETNAME command not supported, setting ip_address to unknown", Warning)
+ return "unknown"
+ client_adresses = [client["addr"] for client in connection.client_list() if client["name"] == client_name]
+ if len(client_adresses) > 0:
+ return client_adresses[0]
+ else:
+ warnings.warn("CLIENT LIST command not supported, setting ip_address to unknown", Warning)
+ return "unknown"
+
+
+def _ensure_list(obj: Any) -> List:
+ """When passed an iterable of objects, does nothing, otherwise, it returns a list with just that object in it.
+
+ :param obj: The object to ensure is a list
+ :return:
+ """
+ is_nonstring_iterable = isinstance(obj, Iterable) and not isinstance(obj, str)
+ return obj if is_nonstring_iterable else [obj]
diff --git a/testproject/testproject/settings.py b/testproject/testproject/settings.py
index e076068..39b0b96 100644
--- a/testproject/testproject/settings.py
+++ b/testproject/testproject/settings.py
@@ -1,7 +1,16 @@
import os
-import django
-from fakeredis import FakeConnection
+
+from scheduler.settings_types import QueueConfiguration
+
+BROKER_PORT = os.getenv("BROKER_PORT", "6379")
+SCHEDULER_QUEUES = {
+ "default": QueueConfiguration(URL=f"redis://localhost:{BROKER_PORT}/0"),
+ "low": QueueConfiguration(URL=f"redis://localhost:{BROKER_PORT}/0"),
+ "high": QueueConfiguration(URL=f"redis://localhost:{BROKER_PORT}/1"),
+ "medium": QueueConfiguration(URL=f"redis://localhost:{BROKER_PORT}/1"),
+ "another": QueueConfiguration(URL=f"redis://localhost:{BROKER_PORT}/1"),
+}
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@@ -25,7 +34,7 @@
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
- "scheduler",
+ "scheduler.apps.SchedulerConfig",
]
MIDDLEWARE = [
@@ -111,25 +120,7 @@
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
-BROKER_PORT = os.getenv("BROKER_PORT", "6379")
STATIC_URL = "/static/"
-SCHEDULER_QUEUES = {
- "default": {
- "URL": f"redis://localhost:{BROKER_PORT}/0",
- },
- "low": {
- "URL": f"redis://localhost:{BROKER_PORT}/0",
- },
- "high": {
- "URL": f"redis://localhost:{BROKER_PORT}/1",
- },
- "medium": {
- "URL": f"redis://localhost:{BROKER_PORT}/1",
- },
- "another": {
- "URL": f"redis://localhost:{BROKER_PORT}/1",
- },
-}
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
LOGGING = {
@@ -145,16 +136,16 @@
},
},
"handlers": {
- "console": {"level": "INFO", "class": "logging.StreamHandler", "formatter": "simple"},
+ "console": {"level": "DEBUG", "class": "logging.StreamHandler", "formatter": "simple"},
},
"root": {
"handlers": ["console"],
- "level": "INFO",
+ "level": "DEBUG",
},
"loggers": {
"scheduler": {
"handlers": ["console"],
- "level": "INFO",
+ "level": "DEBUG",
},
},
}
|