1
1
from typing import cast
2
2
3
+ import os
4
+ import json
3
5
import requests
4
6
from .FastChecksumAddress import to_checksum_address
5
7
from hexbytes import HexBytes
@@ -33,10 +35,13 @@ def get_text(url: str) -> str:
33
35
res .raise_for_status ()
34
36
return res .text
35
37
38
+ # getting up to the next 100k blocks in anticipation of future queries.
39
+ future_logs_cache = {}
36
40
def get_filter (
37
41
chain_id : int ,
38
42
filter_params : FilterParams ,
39
43
partial_allowed = False ,
44
+ disable_subsquid_look_ahead_cache : bool = os .getenv ("DISABLE_SUBSQUID_LOOKAHEAD_CACHE" , "false" ).lower () == "true" ,
40
45
p_bar = None
41
46
) -> tuple [int , list [LogReceipt ]]:
42
47
endpoints = get_endpoints ()
@@ -62,7 +67,7 @@ def get_filter(
62
67
raise ValueError (f"Subsquid has only indexed till block { latest_block } " )
63
68
64
69
query = {
65
- "toBlock" : to_block ,
70
+ "toBlock" : to_block if disable_subsquid_look_ahead_cache else to_block + 100_000 ,
66
71
"logs" : [{}],
67
72
"fields" : {
68
73
"log" : {
@@ -95,14 +100,38 @@ def get_filter(
95
100
96
101
logs : list [LogReceipt ] = []
97
102
while from_block <= to_block :
98
- worker_url = get_text (f'{ gateway_url } /{ from_block } /worker' )
103
+ cache_key_query = query .copy ()
104
+ cache_key_query .pop ("fromBlock" , None )
105
+ cache_key_query .pop ("toBlock" )
106
+ cache_key = json .dumps (cache_key_query )
99
107
100
- query ['fromBlock' ] = from_block
101
- res = requests .post (worker_url , json = query )
102
- res .raise_for_status ()
103
- blocks = res .json ()
108
+ if cache_key in future_logs_cache and future_logs_cache [cache_key ]["fromBlock" ] == from_block :
109
+ blocks = future_logs_cache .pop (cache_key )["blocks" ]
110
+ else :
111
+ worker_url = get_text (f'{ gateway_url } /{ from_block } /worker' )
112
+
113
+ query ['fromBlock' ] = from_block
114
+ res = requests .post (worker_url , json = query )
115
+ res .raise_for_status ()
116
+ blocks = res .json ()
117
+
118
+ # got more results than needed right now. Caching additional results
119
+ if blocks [- 1 ]['header' ]['number' ] > to_block :
120
+ if not disable_subsquid_look_ahead_cache :
121
+ if len (future_logs_cache ) > 10 :
122
+ # limiting future_logs_cache to 10 items
123
+ future_logs_cache .pop (next (iter (future_logs_cache )))
124
+ future_blocks = [block for block in blocks if block ['header' ]['number' ] > to_block ]
125
+ future_logs_cache [cache_key ] = {
126
+ "fromBlock" : to_block + 1 ,
127
+ "blocks" : future_blocks ,
128
+ }
129
+ blocks = [block for block in blocks if block ['header' ]['number' ] <= to_block ]
130
+ last_processed_block = to_block
131
+ else :
132
+ last_processed_block = blocks [- 1 ]['header' ]['number' ]
104
133
105
- last_processed_block = blocks [ - 1 ][ 'header' ][ 'number' ]
134
+ assert last_processed_block <= to_block
106
135
if p_bar is not None :
107
136
p_bar .update (last_processed_block - from_block + 1 )
108
137
from_block = last_processed_block + 1
0 commit comments