55import math
66from collections .abc import Iterable
77from queue import Queue
8- from typing import Any , TypedDict
8+ from typing import TYPE_CHECKING , Any , TypedDict
99
1010from apify_shared .utils import filter_out_none_values_recursively , ignore_docs , parse_date_fields
1111from more_itertools import constrained_batches
1414from apify_client ._utils import catch_not_found_or_throw , pluck_data
1515from apify_client .clients .base import ResourceClient , ResourceClientAsync
1616
17+ if TYPE_CHECKING :
18+ from datetime import timedelta
19+
1720logger = logging .getLogger (__name__ )
1821
1922_RQ_MAX_REQUESTS_PER_BATCH = 25
@@ -279,6 +282,8 @@ def batch_add_requests(
279282 * ,
280283 forefront : bool = False ,
281284 max_parallel : int = 1 ,
285+ max_unprocessed_requests_retries : int | None = None ,
286+ min_delay_between_unprocessed_requests_retries : timedelta | None = None ,
282287 ) -> BatchAddRequestsResult :
283288 """Add requests to the request queue in batches.
284289
@@ -292,10 +297,17 @@ def batch_add_requests(
292297 max_parallel: Specifies the maximum number of parallel tasks for API calls. This is only applicable
293298 to the async client. For the sync client, this value must be set to 1, as parallel execution
294299 is not supported.
300+ max_unprocessed_requests_retries: Deprecated argument. Will be removed in next major release.
301+ min_delay_between_unprocessed_requests_retries: Deprecated argument. Will be removed in next major release.
295302
296303 Returns:
297304 Result containing lists of processed and unprocessed requests.
298305 """
306+ if max_unprocessed_requests_retries :
307+ logger .warning ('`max_unprocessed_requests_retries` is deprecated and not used anymore.' )
308+ if min_delay_between_unprocessed_requests_retries :
309+ logger .warning ('`min_delay_between_unprocessed_requests_retries` is deprecated and not used anymore.' )
310+
299311 if max_parallel != 1 :
300312 raise NotImplementedError ('max_parallel is only supported in async client' )
301313
@@ -678,6 +690,8 @@ async def batch_add_requests(
678690 * ,
679691 forefront : bool = False ,
680692 max_parallel : int = 5 ,
693+ max_unprocessed_requests_retries : int | None = None ,
694+ min_delay_between_unprocessed_requests_retries : timedelta | None = None ,
681695 ) -> BatchAddRequestsResult :
682696 """Add requests to the request queue in batches.
683697
@@ -691,10 +705,17 @@ async def batch_add_requests(
691705 max_parallel: Specifies the maximum number of parallel tasks for API calls. This is only applicable
692706 to the async client. For the sync client, this value must be set to 1, as parallel execution
693707 is not supported.
708+ max_unprocessed_requests_retries: Deprecated argument. Will be removed in next major release.
709+ min_delay_between_unprocessed_requests_retries: Deprecated argument. Will be removed in next major release.
694710
695711 Returns:
696712 Result containing lists of processed and unprocessed requests.
697713 """
714+ if max_unprocessed_requests_retries :
715+ logger .warning ('`max_unprocessed_requests_retries` is deprecated and not used anymore.' )
716+ if min_delay_between_unprocessed_requests_retries :
717+ logger .warning ('`min_delay_between_unprocessed_requests_retries` is deprecated and not used anymore.' )
718+
698719 tasks = set [asyncio .Task ]()
699720 queue : asyncio .Queue [Iterable [dict ]] = asyncio .Queue ()
700721 request_params = self ._params (clientKey = self .client_key , forefront = forefront )
0 commit comments