1616)
1717from spinach .job import Job , JobStatus
1818from spinach .task import Task
19+ from spinach import signals
1920
2021
2122CONCURRENT_TASK_NAME = 'i_am_concurrent'
@@ -281,7 +282,7 @@ def test_enqueue_jobs_from_dead_broker(broker, broker_2):
281282 assert current == b'1'
282283
283284 # Mark broker as dead, should re-enqueue only the idempotent jobs.
284- assert broker_2 .enqueue_jobs_from_dead_broker (broker ._id ) == 2
285+ assert broker_2 .enqueue_jobs_from_dead_broker (broker ._id ) == ( 2 , [])
285286
286287 # Check that the current_concurrency was decremented for job_3.
287288 current = broker ._r .hget (
@@ -301,11 +302,36 @@ def test_enqueue_jobs_from_dead_broker(broker, broker_2):
301302
302303 # Check that a broker can be marked as dead multiple times
303304 # without duplicating jobs
304- assert broker_2 .enqueue_jobs_from_dead_broker (broker ._id ) == 0
305+ assert broker_2 .enqueue_jobs_from_dead_broker (broker ._id ) == (0 , [])
306+
307+
308+ def test_enqueue_fails_jobs_from_dead_broker_if_max_retries_exceeded (
309+ broker , broker_2
310+ ):
311+ job_1 = Job ('foo_task' , 'foo_queue' , datetime .now (timezone .utc ), 1 )
312+ job_1 .retries = 1
313+ job_2 = Job ('foo_task' , 'foo_queue' , datetime .now (timezone .utc ), 10 )
314+ broker .enqueue_jobs ([job_1 , job_2 ])
315+
316+ # Start the job.
317+ broker .get_jobs_from_queue ('foo_queue' , 100 )
318+
319+ # Simulate a dead broker.
320+ num_requeued , failed_jobs = broker_2 .enqueue_jobs_from_dead_broker (
321+ broker ._id
322+ )
323+
324+ # Check that one was requeued and the one marked failed is job_1.
325+ assert num_requeued == 1
326+ jobs = [Job .deserialize (job .decode ()) for job in failed_jobs ]
327+ job_1 .status = JobStatus .RUNNING
328+ assert [job_1 ] == jobs
305329
306330
307331def test_detect_dead_broker (broker , broker_2 ):
308- broker_2 .enqueue_jobs_from_dead_broker = Mock (return_value = 10 )
332+ broker_2 .enqueue_jobs_from_dead_broker = Mock (
333+ return_value = (10 , [])
334+ )
309335
310336 # Register the first broker
311337 broker .move_future_jobs ()
@@ -321,8 +347,34 @@ def test_detect_dead_broker(broker, broker_2):
321347 )
322348
323349
350+ def test_dead_jobs_exceeding_max_retries_are_marked_failed (broker , broker_2 ):
351+ job_1 = Job ('foo_task' , 'foo_queue' , datetime .now (timezone .utc ), 1 )
352+ job_1 .retries = 1
353+ # Register the first broker
354+ broker .move_future_jobs ()
355+ broker_2 .enqueue_jobs_from_dead_broker = Mock (
356+ return_value = (0 , [job_1 .serialize ()])
357+ )
358+ # Set the 2nd broker to detect dead brokers after 2 seconds of inactivity
359+ broker_2 .broker_dead_threshold_seconds = 2
360+ time .sleep (2.1 )
361+
362+ signal_called = False
363+
364+ @signals .job_failed .connect
365+ def check_job (namespace , job , err , ** kwargs ):
366+ nonlocal signal_called
367+ signal_called = True
368+ assert job .status == JobStatus .FAILED
369+
370+ assert 0 == broker_2 .move_future_jobs ()
371+ assert True is signal_called
372+
373+
324374def test_not_detect_deregistered_broker_as_dead (broker , broker_2 ):
325- broker_2 .enqueue_jobs_from_dead_broker = Mock (return_value = 10 )
375+ broker_2 .enqueue_jobs_from_dead_broker = Mock (
376+ return_value = (10 , [])
377+ )
326378
327379 # Register and de-register the first broker
328380 broker .move_future_jobs ()
0 commit comments