@@ -412,10 +412,12 @@ void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg)
412
412
* @file: whether the corresponding folio is from the file lru.
413
413
* @workingset: where the workingset value unpacked from shadow should
414
414
* be stored.
415
+ * @flush: whether to flush cgroup rstat.
415
416
*
416
417
* Return: true if the shadow is for a recently evicted folio; false otherwise.
417
418
*/
418
- bool workingset_test_recent (void * shadow , bool file , bool * workingset )
419
+ bool workingset_test_recent (void * shadow , bool file , bool * workingset ,
420
+ bool flush )
419
421
{
420
422
struct mem_cgroup * eviction_memcg ;
421
423
struct lruvec * eviction_lruvec ;
@@ -467,10 +469,16 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset)
467
469
468
470
/*
469
471
* Flush stats (and potentially sleep) outside the RCU read section.
472
+ *
473
+ * Note that workingset_test_recent() itself might be called in RCU read
474
+ * section (for e.g, in cachestat) - these callers need to skip flushing
475
+ * stats (via the flush argument).
476
+ *
470
477
* XXX: With per-memcg flushing and thresholding, is ratelimiting
471
478
* still needed here?
472
479
*/
473
- mem_cgroup_flush_stats_ratelimited (eviction_memcg );
480
+ if (flush )
481
+ mem_cgroup_flush_stats_ratelimited (eviction_memcg );
474
482
475
483
eviction_lruvec = mem_cgroup_lruvec (eviction_memcg , pgdat );
476
484
refault = atomic_long_read (& eviction_lruvec -> nonresident_age );
@@ -558,7 +566,7 @@ void workingset_refault(struct folio *folio, void *shadow)
558
566
559
567
mod_lruvec_state (lruvec , WORKINGSET_REFAULT_BASE + file , nr );
560
568
561
- if (!workingset_test_recent (shadow , file , & workingset ))
569
+ if (!workingset_test_recent (shadow , file , & workingset , true ))
562
570
return ;
563
571
564
572
folio_set_active (folio );
0 commit comments