1616#include <linux/prefetch.h>
1717#include <linux/sched/mm.h>
1818
19+ #define BTREE_CACHE_NOT_FREED_INCREMENT (counter ) \
20+ do { \
21+ if (shrinker_counter) \
22+ bc->not_freed_##counter++; \
23+ } while (0)
24+
1925const char * const bch2_btree_node_flags [] = {
2026#define x (f ) #f ,
2127 BTREE_FLAGS ()
@@ -238,7 +244,7 @@ static inline struct btree *btree_cache_find(struct btree_cache *bc,
238244 * this version is for btree nodes that have already been freed (we're not
239245 * reaping a real btree node)
240246 */
241- static int __btree_node_reclaim (struct bch_fs * c , struct btree * b , bool flush )
247+ static int __btree_node_reclaim (struct bch_fs * c , struct btree * b , bool flush , bool shrinker_counter )
242248{
243249 struct btree_cache * bc = & c -> btree_cache ;
244250 int ret = 0 ;
@@ -260,38 +266,64 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
260266 if (b -> flags & ((1U << BTREE_NODE_dirty )|
261267 (1U << BTREE_NODE_read_in_flight )|
262268 (1U << BTREE_NODE_write_in_flight ))) {
263- if (!flush )
269+ if (!flush ) {
270+ if (btree_node_dirty (b ))
271+ BTREE_CACHE_NOT_FREED_INCREMENT (dirty );
272+ else if (btree_node_read_in_flight (b ))
273+ BTREE_CACHE_NOT_FREED_INCREMENT (read_in_flight );
274+ else if (btree_node_write_in_flight (b ))
275+ BTREE_CACHE_NOT_FREED_INCREMENT (write_in_flight );
264276 return - BCH_ERR_ENOMEM_btree_node_reclaim ;
277+ }
265278
266279 /* XXX: waiting on IO with btree cache lock held */
267280 bch2_btree_node_wait_on_read (b );
268281 bch2_btree_node_wait_on_write (b );
269282 }
270283
271- if (!six_trylock_intent (& b -> c .lock ))
284+ if (!six_trylock_intent (& b -> c .lock )) {
285+ BTREE_CACHE_NOT_FREED_INCREMENT (lock_intent );
272286 return - BCH_ERR_ENOMEM_btree_node_reclaim ;
287+ }
273288
274- if (!six_trylock_write (& b -> c .lock ))
289+ if (!six_trylock_write (& b -> c .lock )) {
290+ BTREE_CACHE_NOT_FREED_INCREMENT (lock_write );
275291 goto out_unlock_intent ;
292+ }
276293
277294 /* recheck under lock */
278295 if (b -> flags & ((1U << BTREE_NODE_read_in_flight )|
279296 (1U << BTREE_NODE_write_in_flight ))) {
280- if (!flush )
297+ if (!flush ) {
298+ if (btree_node_read_in_flight (b ))
299+ BTREE_CACHE_NOT_FREED_INCREMENT (read_in_flight );
300+ else if (btree_node_write_in_flight (b ))
301+ BTREE_CACHE_NOT_FREED_INCREMENT (write_in_flight );
281302 goto out_unlock ;
303+ }
282304 six_unlock_write (& b -> c .lock );
283305 six_unlock_intent (& b -> c .lock );
284306 goto wait_on_io ;
285307 }
286308
287- if (btree_node_noevict (b ) ||
288- btree_node_write_blocked (b ) ||
289- btree_node_will_make_reachable (b ))
309+ if (btree_node_noevict (b )) {
310+ BTREE_CACHE_NOT_FREED_INCREMENT (noevict );
311+ goto out_unlock ;
312+ }
313+ if (btree_node_write_blocked (b )) {
314+ BTREE_CACHE_NOT_FREED_INCREMENT (write_blocked );
290315 goto out_unlock ;
316+ }
317+ if (btree_node_will_make_reachable (b )) {
318+ BTREE_CACHE_NOT_FREED_INCREMENT (will_make_reachable );
319+ goto out_unlock ;
320+ }
291321
292322 if (btree_node_dirty (b )) {
293- if (!flush )
323+ if (!flush ) {
324+ BTREE_CACHE_NOT_FREED_INCREMENT (dirty );
294325 goto out_unlock ;
326+ }
295327 /*
296328 * Using the underscore version because we don't want to compact
297329 * bsets after the write, since this node is about to be evicted
@@ -321,14 +353,14 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
321353 goto out ;
322354}
323355
324- static int btree_node_reclaim (struct bch_fs * c , struct btree * b )
356+ static int btree_node_reclaim (struct bch_fs * c , struct btree * b , bool shrinker_counter )
325357{
326- return __btree_node_reclaim (c , b , false);
358+ return __btree_node_reclaim (c , b , false, shrinker_counter );
327359}
328360
329361static int btree_node_write_and_reclaim (struct bch_fs * c , struct btree * b )
330362{
331- return __btree_node_reclaim (c , b , true);
363+ return __btree_node_reclaim (c , b , true, false );
332364}
333365
334366static unsigned long bch2_btree_cache_scan (struct shrinker * shrink ,
@@ -376,11 +408,12 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
376408 if (touched >= nr )
377409 goto out ;
378410
379- if (!btree_node_reclaim (c , b )) {
411+ if (!btree_node_reclaim (c , b , true )) {
380412 btree_node_data_free (c , b );
381413 six_unlock_write (& b -> c .lock );
382414 six_unlock_intent (& b -> c .lock );
383415 freed ++ ;
416+ bc -> freed ++ ;
384417 }
385418 }
386419restart :
@@ -389,9 +422,11 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
389422
390423 if (btree_node_accessed (b )) {
391424 clear_btree_node_accessed (b );
392- } else if (!btree_node_reclaim (c , b )) {
425+ bc -> not_freed_access_bit ++ ;
426+ } else if (!btree_node_reclaim (c , b , true)) {
393427 freed ++ ;
394428 btree_node_data_free (c , b );
429+ bc -> freed ++ ;
395430
396431 bch2_btree_node_hash_remove (bc , b );
397432 six_unlock_write (& b -> c .lock );
@@ -599,7 +634,7 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c)
599634 struct btree * b ;
600635
601636 list_for_each_entry_reverse (b , & bc -> live , list )
602- if (!btree_node_reclaim (c , b ))
637+ if (!btree_node_reclaim (c , b , false ))
603638 return b ;
604639
605640 while (1 ) {
@@ -635,7 +670,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
635670 * disk node. Check the freed list before allocating a new one:
636671 */
637672 list_for_each_entry (b , freed , list )
638- if (!btree_node_reclaim (c , b )) {
673+ if (!btree_node_reclaim (c , b , false )) {
639674 list_del_init (& b -> list );
640675 goto got_node ;
641676 }
@@ -661,7 +696,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
661696 * the list. Check if there's any freed nodes there:
662697 */
663698 list_for_each_entry (b2 , & bc -> freeable , list )
664- if (!btree_node_reclaim (c , b2 )) {
699+ if (!btree_node_reclaim (c , b2 , false )) {
665700 swap (b -> data , b2 -> data );
666701 swap (b -> aux_data , b2 -> aux_data );
667702 btree_node_to_freedlist (bc , b2 );
@@ -1280,12 +1315,12 @@ static void prt_btree_cache_line(struct printbuf *out, const struct bch_fs *c,
12801315 prt_printf (out , " (%u)\n" , nr );
12811316}
12821317
1283- void bch2_btree_cache_to_text (struct printbuf * out , const struct bch_fs * c )
1318+ void bch2_btree_cache_to_text (struct printbuf * out , const struct btree_cache * bc )
12841319{
1285- const struct btree_cache * bc = & c -> btree_cache ;
1320+ struct bch_fs * c = container_of ( bc , struct bch_fs , btree_cache ) ;
12861321
12871322 if (!out -> nr_tabstops )
1288- printbuf_tabstop_push (out , 24 );
1323+ printbuf_tabstop_push (out , 32 );
12891324
12901325 prt_btree_cache_line (out , c , "total:" , bc -> used );
12911326 prt_btree_cache_line (out , c , "nr dirty:" , atomic_read (& bc -> dirty ));
@@ -1294,4 +1329,17 @@ void bch2_btree_cache_to_text(struct printbuf *out, const struct bch_fs *c)
12941329
12951330 for (unsigned i = 0 ; i < ARRAY_SIZE (bc -> used_by_btree ); i ++ )
12961331 prt_btree_cache_line (out , c , bch2_btree_id_str (i ), bc -> used_by_btree [i ]);
1332+
1333+ prt_newline (out );
1334+ prt_printf (out , "freed:\t%u\n" , bc -> freed );
1335+ prt_printf (out , "not freed:\n" );
1336+ prt_printf (out , " dirty\t%u\n" , bc -> not_freed_dirty );
1337+ prt_printf (out , " write in flight\t%u\n" , bc -> not_freed_write_in_flight );
1338+ prt_printf (out , " read in flight\t%u\n" , bc -> not_freed_read_in_flight );
1339+ prt_printf (out , " lock intent failed\t%u\n" , bc -> not_freed_lock_intent );
1340+ prt_printf (out , " lock write failed\t%u\n" , bc -> not_freed_lock_write );
1341+ prt_printf (out , " access bit\t%u\n" , bc -> not_freed_access_bit );
1342+ prt_printf (out , " no evict failed\t%u\n" , bc -> not_freed_noevict );
1343+ prt_printf (out , " write blocked\t%u\n" , bc -> not_freed_write_blocked );
1344+ prt_printf (out , " will make reachable\t%u\n" , bc -> not_freed_will_make_reachable );
12971345}
0 commit comments