1
- use super :: cache:: { CachedResponse , QueryCache } ;
1
+ use super :: cache:: QueryCache ;
2
2
use graph:: prelude:: CheapClone ;
3
3
use graphql_parser:: query as q;
4
4
use graphql_parser:: schema as s;
@@ -9,7 +9,6 @@ use stable_hash::prelude::*;
9
9
use stable_hash:: utils:: stable_hash;
10
10
use std:: collections:: { BTreeMap , HashMap , HashSet , VecDeque } ;
11
11
use std:: iter;
12
- use std:: ops:: Deref ;
13
12
use std:: sync:: atomic:: { AtomicBool , Ordering } ;
14
13
use std:: sync:: RwLock ;
15
14
use std:: time:: Instant ;
@@ -33,7 +32,7 @@ struct CacheByBlock {
33
32
block : EthereumBlockPointer ,
34
33
max_weight : usize ,
35
34
weight : usize ,
36
- cache : HashMap < QueryHash , CachedResponse < QueryResponse > > ,
35
+ cache : HashMap < QueryHash , QueryResponse > ,
37
36
}
38
37
39
38
impl CacheByBlock {
@@ -47,14 +46,13 @@ impl CacheByBlock {
47
46
}
48
47
49
48
/// Returns `true` if the insert was successful or `false` if the cache was full.
50
- fn insert ( & mut self , key : QueryHash , value : & CachedResponse < QueryResponse > ) -> bool {
49
+ fn insert ( & mut self , key : QueryHash , value : QueryResponse ) -> bool {
51
50
// Unwrap: We never try to insert errors into this cache.
52
- let weight = value. deref ( ) . as_ref ( ) . ok ( ) . unwrap ( ) . weight ( ) ;
53
-
51
+ let weight = value. as_ref ( ) . unwrap ( ) . weight ( ) ;
54
52
let fits_in_cache = self . weight + weight <= self . max_weight ;
55
53
if fits_in_cache {
56
54
self . weight += weight;
57
- self . cache . insert ( key, value. cheap_clone ( ) ) ;
55
+ self . cache . insert ( key, value) ;
58
56
}
59
57
fits_in_cache
60
58
}
@@ -100,21 +98,6 @@ lazy_static! {
100
98
static ref QUERY_HERD_CACHE : QueryCache <QueryResponse > = QueryCache :: new( ) ;
101
99
}
102
100
103
- pub enum MaybeCached < T > {
104
- NotCached ( T ) ,
105
- Cached ( CachedResponse < T > ) ,
106
- }
107
-
108
- impl < T : Clone > MaybeCached < T > {
109
- // Note that this drops any handle to the cache that may exist.
110
- pub fn to_inner ( self ) -> T {
111
- match self {
112
- MaybeCached :: NotCached ( t) => t,
113
- MaybeCached :: Cached ( t) => t. deref ( ) . clone ( ) ,
114
- }
115
- }
116
- }
117
-
118
101
struct HashableQuery < ' a > {
119
102
query_schema_id : & ' a SubgraphDeploymentId ,
120
103
query_variables : & ' a HashMap < q:: Name , q:: Value > ,
@@ -317,7 +300,7 @@ pub fn execute_root_selection_set(
317
300
selection_set : & q:: SelectionSet ,
318
301
root_type : & s:: ObjectType ,
319
302
block_ptr : Option < EthereumBlockPointer > ,
320
- ) -> MaybeCached < QueryResponse > {
303
+ ) -> QueryResponse {
321
304
// Cache the cache key to not have to calculate it twice - once for lookup
322
305
// and once for insert.
323
306
let mut key: Option < QueryHash > = None ;
@@ -337,7 +320,7 @@ pub fn execute_root_selection_set(
337
320
// Iterate from the most recent block looking for a block that matches.
338
321
if let Some ( cache_by_block) = cache. iter ( ) . find ( |c| c. block == block_ptr) {
339
322
if let Some ( response) = cache_by_block. cache . get ( & cache_key) {
340
- return MaybeCached :: Cached ( response. cheap_clone ( ) ) ;
323
+ return response. clone ( ) ;
341
324
}
342
325
}
343
326
@@ -347,51 +330,47 @@ pub fn execute_root_selection_set(
347
330
}
348
331
349
332
let result = if let Some ( key) = key {
350
- let cached = QUERY_HERD_CACHE . cached_query ( key, || {
333
+ QUERY_HERD_CACHE . cached_query ( key, || {
351
334
execute_root_selection_set_uncached ( ctx, selection_set, root_type)
352
- } ) ;
353
- MaybeCached :: Cached ( cached)
335
+ } )
354
336
} else {
355
- let not_cached = execute_root_selection_set_uncached ( ctx, selection_set, root_type) ;
356
- MaybeCached :: NotCached ( not_cached)
337
+ execute_root_selection_set_uncached ( ctx, selection_set, root_type)
357
338
} ;
358
339
359
340
// Check if this query should be cached.
360
- if let ( MaybeCached :: Cached ( cached) , Some ( key) , Some ( block_ptr) ) = ( & result, key, block_ptr) {
361
- // Share errors from the herd cache, but don't store them in generational cache.
362
- // In particular, there is a problem where asking for a block pointer beyond the chain
363
- // head can cause the legitimate cache to be thrown out.
364
- if cached. is_ok ( ) {
365
- let mut cache = QUERY_CACHE . write ( ) . unwrap ( ) ;
366
-
367
- // If there is already a cache by the block of this query, just add it there.
368
- if let Some ( cache_by_block) = cache. iter_mut ( ) . find ( |c| c. block == block_ptr) {
369
- let cache_insert = cache_by_block. insert ( key, cached) ;
370
- ctx. cache_insert . store ( cache_insert, Ordering :: SeqCst ) ;
371
- } else if * QUERY_CACHE_BLOCKS > 0 {
372
- // We're creating a new `CacheByBlock` if:
373
- // - There are none yet, this is the first query being cached, or
374
- // - `block_ptr` is of higher or equal number than the most recent block in the cache.
375
- // Otherwise this is a historical query which will not be cached.
376
- let should_insert = match cache. iter ( ) . next ( ) {
377
- None => true ,
378
- Some ( highest) if highest. block . number <= block_ptr. number => true ,
379
- Some ( _) => false ,
380
- } ;
381
-
382
- if should_insert {
383
- if cache. len ( ) == * QUERY_CACHE_BLOCKS {
384
- // At capacity, so pop the oldest block.
385
- cache. pop_back ( ) ;
386
- }
341
+ // Share errors from the herd cache, but don't store them in generational cache.
342
+ // In particular, there is a problem where asking for a block pointer beyond the chain
343
+ // head can cause the legitimate cache to be thrown out.
344
+ if let ( Ok ( _) , Some ( key) , Some ( block_ptr) ) = ( & result, key, block_ptr) {
345
+ let mut cache = QUERY_CACHE . write ( ) . unwrap ( ) ;
346
+
347
+ // If there is already a cache by the block of this query, just add it there.
348
+ if let Some ( cache_by_block) = cache. iter_mut ( ) . find ( |c| c. block == block_ptr) {
349
+ let cache_insert = cache_by_block. insert ( key, result. clone ( ) ) ;
350
+ ctx. cache_insert . store ( cache_insert, Ordering :: SeqCst ) ;
351
+ } else if * QUERY_CACHE_BLOCKS > 0 {
352
+ // We're creating a new `CacheByBlock` if:
353
+ // - There are none yet, this is the first query being cached, or
354
+ // - `block_ptr` is of higher or equal number than the most recent block in the cache.
355
+ // Otherwise this is a historical query which will not be cached.
356
+ let should_insert = match cache. iter ( ) . next ( ) {
357
+ None => true ,
358
+ Some ( highest) if highest. block . number <= block_ptr. number => true ,
359
+ Some ( _) => false ,
360
+ } ;
387
361
388
- // Create a new cache by block, insert this entry, and add it to the QUERY_CACHE.
389
- let max_weight = * QUERY_CACHE_MAX_MEM / * QUERY_CACHE_BLOCKS ;
390
- let mut cache_by_block = CacheByBlock :: new ( block_ptr, max_weight) ;
391
- let cache_insert = cache_by_block. insert ( key, cached) ;
392
- ctx. cache_insert . store ( cache_insert, Ordering :: SeqCst ) ;
393
- cache. push_front ( cache_by_block) ;
362
+ if should_insert {
363
+ if cache. len ( ) == * QUERY_CACHE_BLOCKS {
364
+ // At capacity, so pop the oldest block.
365
+ cache. pop_back ( ) ;
394
366
}
367
+
368
+ // Create a new cache by block, insert this entry, and add it to the QUERY_CACHE.
369
+ let max_weight = * QUERY_CACHE_MAX_MEM / * QUERY_CACHE_BLOCKS ;
370
+ let mut cache_by_block = CacheByBlock :: new ( block_ptr, max_weight) ;
371
+ let cache_insert = cache_by_block. insert ( key, result. clone ( ) ) ;
372
+ ctx. cache_insert . store ( cache_insert, Ordering :: SeqCst ) ;
373
+ cache. push_front ( cache_by_block) ;
395
374
}
396
375
}
397
376
}
0 commit comments