3
3
import asyncio
4
4
import logging
5
5
import math
6
+ from dataclasses import dataclass
6
7
from datetime import timedelta
7
8
from typing import Any , TypedDict
8
9
20
21
21
22
22
23
class BatchAddRequestsResult (TypedDict ):
23
- """Result of the batch add requests operation."""
24
+ """Result of the batch add requests operation.
25
+
26
+ Args:
27
+ processed_requests: List of requests that were added.
28
+ unprocessed_requests: List of requests that failed to be added.
29
+ """
24
30
25
31
processed_requests : list [dict ]
26
32
unprocessed_requests : list [dict ]
27
33
28
34
35
+ @dataclass
36
+ class AddRequestsBatch :
37
+ """Batch of requests to add to the request queue.
38
+
39
+ Args:
40
+ requests: List of requests to be added to the request queue.
41
+ num_of_retries: Number of retries for the batch.
42
+ """
43
+
44
+ requests : list [dict ]
45
+ num_of_retries : int = 0
46
+
47
+
29
48
class RequestQueueClient (ResourceClient ):
30
49
"""Sub-client for manipulating a single request queue."""
31
50
@@ -559,15 +578,13 @@ async def delete_request_lock(
559
578
560
579
async def _batch_add_requests_worker (
561
580
self ,
562
- queue : asyncio .Queue ,
581
+ queue : asyncio .Queue [ AddRequestsBatch ] ,
563
582
request_params : dict ,
564
583
max_unprocessed_requests_retries : int ,
565
584
min_delay_between_unprocessed_requests_retries : timedelta ,
566
585
) -> BatchAddRequestsResult :
567
- processed_requests = []
568
- unprocessed_requests = []
569
-
570
- # TODO: add retry logic
586
+ processed_requests = list [dict ]()
587
+ unprocessed_requests = list [dict ]()
571
588
572
589
try :
573
590
while True :
@@ -577,13 +594,23 @@ async def _batch_add_requests_worker(
577
594
url = self ._url ('requests/batch' ),
578
595
method = 'POST' ,
579
596
params = request_params ,
580
- json = batch ,
597
+ json = batch . requests ,
581
598
)
582
599
583
600
response_parsed = parse_date_fields (pluck_data (response .json ()))
584
601
602
+ # If the request was successful, add it to the processed requests.
585
603
if 200 <= response .status_code <= 299 :
586
604
processed_requests .append (response_parsed )
605
+
606
+ # If the request was not successful and the number of retries is less than the maximum,
607
+ # retry the request.
608
+ elif batch .num_of_retries < max_unprocessed_requests_retries :
609
+ batch .num_of_retries += 1
610
+ await asyncio .sleep (min_delay_between_unprocessed_requests_retries .total_seconds ())
611
+ await queue .put (batch )
612
+
613
+ # Otherwise, add the request to the unprocessed requests.
587
614
else :
588
615
unprocessed_requests .append (response_parsed )
589
616
@@ -625,10 +652,11 @@ async def batch_add_requests(
625
652
Result of the operation with processed and unprocessed requests.
626
653
"""
627
654
payload_size_limit_bytes = _MAX_PAYLOAD_SIZE_BYTES - math .ceil (_MAX_PAYLOAD_SIZE_BYTES * _SAFETY_BUFFER_PERCENT )
655
+ # TODO: payload size limit bytes
628
656
629
657
request_params = self ._params (clientKey = self .client_key , forefront = forefront )
630
658
tasks = set [asyncio .Task ]()
631
- queue : asyncio .Queue [list [ dict ] ] = asyncio .Queue ()
659
+ queue : asyncio .Queue [AddRequestsBatch ] = asyncio .Queue ()
632
660
633
661
# Get the number of request batches.
634
662
number_of_batches = math .ceil (len (requests ) / _RQ_MAX_REQUESTS_PER_BATCH )
@@ -637,7 +665,7 @@ async def batch_add_requests(
637
665
for i in range (number_of_batches ):
638
666
start = i * _RQ_MAX_REQUESTS_PER_BATCH
639
667
end = (i + 1 ) * _RQ_MAX_REQUESTS_PER_BATCH
640
- batch = requests [start :end ]
668
+ batch = AddRequestsBatch ( requests [start :end ])
641
669
await queue .put (batch )
642
670
643
671
# Start the worker tasks.
0 commit comments