File tree Expand file tree Collapse file tree 2 files changed +8
-4
lines changed Expand file tree Collapse file tree 2 files changed +8
-4
lines changed Original file line number Diff line number Diff line change @@ -305,11 +305,11 @@ def _snapshot_client(self) -> None:
305
305
Only errors produced by a 2nd retry of the API call are considered for snapshotting since earlier errors may
306
306
just be caused by a random spike in the number of requests and do not necessarily signify API overloading.
307
307
"""
308
- # TODO: This is just a dummy placeholder. It can be implemented once `StorageClient` is ready.
309
- # Attribute `self._client_rate_limit_error_retry_count` will be used here.
310
- # https://github.com/apify/crawlee-python/issues/60
308
+ client = service_locator .get_storage_client ()
311
309
312
- error_count = 0
310
+ rate_limit_errors : dict [int , int ] = client .get_rate_limit_errors ()
311
+
312
+ error_count = rate_limit_errors .get (self ._CLIENT_RATE_LIMIT_ERROR_RETRY_COUNT , 0 )
313
313
snapshot = ClientSnapshot (error_count = error_count , max_error_count = self ._max_client_errors )
314
314
315
315
snapshots = cast (list [Snapshot ], self ._client_snapshots )
Original file line number Diff line number Diff line change @@ -56,3 +56,7 @@ async def purge_on_start(self) -> None:
56
56
It is primarily used to clean up residual data from previous runs to maintain a clean state.
57
57
If the storage client does not support purging, leave it empty.
58
58
"""
59
+
60
+ def get_rate_limit_errors (self ) -> dict [int , int ]:
61
+ """Returns statistics about rate limit errors encountered by the HTTP client in storage client."""
62
+ return {}
You can’t perform that action at this time.
0 commit comments