Skip to content

Commit 8cd47ba

Browse files
committed
graphql: Log cache status
1 parent b984e04 commit 8cd47ba

File tree

5 files changed

+67
-36
lines changed

5 files changed

+67
-36
lines changed

Cargo.lock

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

graphql/Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ version = "0.18.0"
44
edition = "2018"
55

66
[dependencies]
7+
crossbeam = "0.7"
78
futures01 = { package="futures", version="0.1.29" }
89
graph = { path = "../graph" }
910
graphql-parser = "0.2.3"

graphql/src/execution/execution.rs

Lines changed: 60 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,10 @@ use stable_hash::prelude::*;
99
use stable_hash::utils::stable_hash;
1010
use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
1111
use std::iter;
12-
use std::sync::atomic::{AtomicBool, Ordering};
1312
use std::sync::RwLock;
1413
use std::time::Instant;
14+
use std::fmt;
15+
use crossbeam::atomic::AtomicCell;
1516

1617
use graph::prelude::*;
1718

@@ -170,6 +171,39 @@ fn cache_key(
170171
stable_hash::<SetHasher, _>(&query)
171172
}
172173

174+
/// Used for checking if a response hit the cache.
175+
#[derive(Copy, Clone)]
176+
pub(crate) enum CacheStatus {
177+
/// Hit is a hit in the generational cache.
178+
Hit,
179+
180+
/// Shared is a hit in the herd cache.
181+
Shared,
182+
183+
/// Insert is a miss that inserted in the generational cache.
184+
Insert,
185+
186+
/// A miss is none of the above.
187+
Miss,
188+
}
189+
190+
impl Default for CacheStatus {
191+
fn default() -> Self {
192+
CacheStatus::Miss
193+
}
194+
}
195+
196+
impl fmt::Display for CacheStatus {
197+
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
198+
match self {
199+
CacheStatus::Hit => f.write_str("hit"),
200+
CacheStatus::Shared => f.write_str("shared"),
201+
CacheStatus::Insert => f.write_str("insert"),
202+
CacheStatus::Miss => f.write_str("miss")
203+
}
204+
}
205+
}
206+
173207
/// Contextual information passed around during query execution.
174208
pub struct ExecutionContext<R>
175209
where
@@ -190,15 +224,8 @@ where
190224
/// Max value for `first`.
191225
pub max_first: u32,
192226

193-
/// Will be `true` if the response was pulled from cache. The mechanism by
194-
/// which this is set is actually to start at `true` and then be set to
195-
/// `false` if the query is executed.
196-
///
197-
/// Used for logging.
198-
pub cached: AtomicBool,
199-
200-
/// Set to `true` if the response was inserted in the cache. Used for logging.
201-
pub cache_insert: AtomicBool,
227+
/// Records whether this was a cache hit, used for logging.
228+
pub(crate) cache_status: AtomicCell<CacheStatus>,
202229
}
203230

204231
// Helpers to look for types and fields on both the introspection and regular schemas.
@@ -235,8 +262,7 @@ where
235262
query: self.query.as_introspection_query(),
236263
deadline: self.deadline,
237264
max_first: std::u32::MAX,
238-
cached: AtomicBool::new(true),
239-
cache_insert: AtomicBool::new(false),
265+
cache_status: AtomicCell::new(CacheStatus::Miss),
240266
}
241267
}
242268
}
@@ -246,8 +272,6 @@ pub fn execute_root_selection_set_uncached(
246272
selection_set: &q::SelectionSet,
247273
root_type: &s::ObjectType,
248274
) -> QueryResponse {
249-
ctx.cached.store(false, std::sync::atomic::Ordering::SeqCst);
250-
251275
// Split the top-level fields into introspection fields and
252276
// regular data fields
253277
let mut data_set = q::SelectionSet {
@@ -321,7 +345,8 @@ pub fn execute_root_selection_set<R: Resolver>(
321345
// Iterate from the most recent block looking for a block that matches.
322346
if let Some(cache_by_block) = cache.iter().find(|c| c.block == block_ptr) {
323347
if let Some(response) = cache_by_block.cache.get(&cache_key) {
324-
return response.clone();
348+
ctx.cache_status.store(CacheStatus::Hit);
349+
return response.cheap_clone();
325350
}
326351
}
327352

@@ -330,33 +355,39 @@ pub fn execute_root_selection_set<R: Resolver>(
330355
}
331356
}
332357

333-
let result = if let Some(key) = key {
334-
QUERY_HERD_CACHE.cached_query(key, || {
335-
Arc::new(QueryResult::from(execute_root_selection_set_uncached(
336-
ctx,
337-
selection_set,
338-
root_type,
339-
)))
340-
})
341-
} else {
358+
let mut herd_hit = true;
359+
let mut run_query = || {
360+
herd_hit = false;
342361
Arc::new(QueryResult::from(execute_root_selection_set_uncached(
343362
ctx,
344363
selection_set,
345364
root_type,
346365
)))
347366
};
367+
let result = if let Some(key) = key {
368+
QUERY_HERD_CACHE.cached_query(key, run_query)
369+
} else {
370+
run_query()
371+
};
372+
if herd_hit {
373+
ctx.cache_status.store(CacheStatus::Shared);
374+
}
348375

349376
// Check if this query should be cached.
350377
// Share errors from the herd cache, but don't store them in generational cache.
351378
// In particular, there is a problem where asking for a block pointer beyond the chain
352379
// head can cause the legitimate cache to be thrown out.
353-
if let (false, Some(key), Some(block_ptr)) = (result.has_errors(), key, block_ptr) {
380+
// It would be redundant to insert herd cache hits.
381+
let no_cache = herd_hit || result.has_errors();
382+
if let (false, Some(key), Some(block_ptr)) = (no_cache, key, block_ptr) {
354383
let mut cache = QUERY_CACHE.write().unwrap();
355384

356385
// If there is already a cache by the block of this query, just add it there.
357386
if let Some(cache_by_block) = cache.iter_mut().find(|c| c.block == block_ptr) {
358387
let cache_insert = cache_by_block.insert(key, result.cheap_clone());
359-
ctx.cache_insert.store(cache_insert, Ordering::SeqCst);
388+
if cache_insert {
389+
ctx.cache_status.store(CacheStatus::Insert);
390+
}
360391
} else if *QUERY_CACHE_BLOCKS > 0 {
361392
// We're creating a new `CacheByBlock` if:
362393
// - There are none yet, this is the first query being cached, or
@@ -378,7 +409,9 @@ pub fn execute_root_selection_set<R: Resolver>(
378409
let max_weight = *QUERY_CACHE_MAX_MEM / *QUERY_CACHE_BLOCKS;
379410
let mut cache_by_block = CacheByBlock::new(block_ptr, max_weight);
380411
let cache_insert = cache_by_block.insert(key, result.cheap_clone());
381-
ctx.cache_insert.store(cache_insert, Ordering::SeqCst);
412+
if cache_insert {
413+
ctx.cache_status.store(CacheStatus::Insert);
414+
}
382415
cache.push_front(cache_by_block);
383416
}
384417
}

graphql/src/query/mod.rs

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ use graph::prelude::{info, o, EthereumBlockPointer, Logger, QueryExecutionError,
22
use graphql_parser::query as q;
33
use std::collections::hash_map::DefaultHasher;
44
use std::hash::{Hash, Hasher};
5-
use std::sync::{atomic::AtomicBool, Arc};
5+
use std::sync::Arc;
66
use std::time::Instant;
77

88
use graph::data::graphql::effort::LoadManager;
@@ -62,8 +62,7 @@ where
6262
query: query.clone(),
6363
deadline: options.deadline,
6464
max_first: options.max_first,
65-
cached: AtomicBool::new(true),
66-
cache_insert: AtomicBool::new(false),
65+
cache_status: Default::default()
6766
};
6867

6968
if !query.is_query() {
@@ -91,7 +90,7 @@ where
9190
"query" => &query.query_text,
9291
"variables" => &query.variables_text,
9392
"query_time_ms" => elapsed.as_millis(),
94-
"cached" => ctx.cached.load(std::sync::atomic::Ordering::SeqCst),
93+
"cached" => ctx.cache_status.load().to_string(),
9594
"block" => block_ptr.map(|b| b.number).unwrap_or(0),
9695
"complexity" => &query.complexity
9796
);

graphql/src/subscription/mod.rs

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ use graphql_parser::{query as q, schema as s};
22
use std::collections::HashMap;
33
use std::iter;
44
use std::result::Result;
5-
use std::sync::{atomic::AtomicBool, Arc};
65
use std::time::{Duration, Instant};
76

87
use graph::prelude::*;
@@ -82,8 +81,7 @@ where
8281
query: query.clone(),
8382
deadline: None,
8483
max_first: options.max_first,
85-
cached: AtomicBool::new(true),
86-
cache_insert: AtomicBool::new(false),
84+
cache_status: Default::default()
8785
};
8886

8987
if !query.is_subscription() {
@@ -200,8 +198,7 @@ async fn execute_subscription_event(
200198
query,
201199
deadline: timeout.map(|t| Instant::now() + t),
202200
max_first,
203-
cached: AtomicBool::new(true),
204-
cache_insert: AtomicBool::new(false),
201+
cache_status: Default::default()
205202
};
206203

207204
// We have established that this exists earlier in the subscription execution

0 commit comments

Comments
 (0)