@@ -49,7 +49,7 @@ void bch2_recalc_btree_reserve(struct bch_fs *c)
49
49
50
50
static inline size_t btree_cache_can_free (struct btree_cache * bc )
51
51
{
52
- return max_t (int , 0 , bc -> nr_used - bc -> nr_reserve );
52
+ return max_t (int , 0 , bc -> nr_live + bc -> nr_freeable - bc -> nr_reserve );
53
53
}
54
54
55
55
static void btree_node_to_freedlist (struct btree_cache * bc , struct btree * b )
@@ -64,6 +64,8 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
64
64
{
65
65
struct btree_cache * bc = & c -> btree_cache ;
66
66
67
+ BUG_ON (btree_node_hashed (b ));
68
+
67
69
/*
68
70
* This should really be done in slub/vmalloc, but we're using the
69
71
* kmalloc_large() path, so we're working around a slub bug by doing
@@ -87,7 +89,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
87
89
#endif
88
90
b -> aux_data = NULL ;
89
91
90
- bc -> nr_used -- ;
92
+ bc -> nr_freeable -- ;
91
93
92
94
btree_node_to_freedlist (bc , b );
93
95
}
@@ -167,7 +169,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
167
169
168
170
bch2_btree_lock_init (& b -> c , 0 );
169
171
170
- bc -> nr_used ++ ;
172
+ bc -> nr_freeable ++ ;
171
173
list_add (& b -> list , & bc -> freeable );
172
174
return b ;
173
175
}
@@ -186,6 +188,7 @@ void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b)
186
188
187
189
void bch2_btree_node_hash_remove (struct btree_cache * bc , struct btree * b )
188
190
{
191
+ lockdep_assert_held (& bc -> lock );
189
192
int ret = rhashtable_remove_fast (& bc -> table , & b -> hash , bch_btree_cache_params );
190
193
191
194
BUG_ON (ret );
@@ -195,6 +198,10 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
195
198
196
199
if (b -> c .btree_id < BTREE_ID_NR )
197
200
-- bc -> nr_by_btree [b -> c .btree_id ];
201
+
202
+ bc -> nr_live -- ;
203
+ bc -> nr_freeable ++ ;
204
+ list_move (& b -> list , & bc -> freeable );
198
205
}
199
206
200
207
int __bch2_btree_node_hash_insert (struct btree_cache * bc , struct btree * b )
@@ -204,23 +211,25 @@ int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
204
211
205
212
int ret = rhashtable_lookup_insert_fast (& bc -> table , & b -> hash ,
206
213
bch_btree_cache_params );
207
- if (!ret && b -> c .btree_id < BTREE_ID_NR )
214
+ if (ret )
215
+ return ret ;
216
+
217
+ if (b -> c .btree_id < BTREE_ID_NR )
208
218
bc -> nr_by_btree [b -> c .btree_id ]++ ;
209
- return ret ;
219
+ bc -> nr_live ++ ;
220
+ bc -> nr_freeable -- ;
221
+ list_move_tail (& b -> list , & bc -> live );
222
+ return 0 ;
210
223
}
211
224
212
225
int bch2_btree_node_hash_insert (struct btree_cache * bc , struct btree * b ,
213
226
unsigned level , enum btree_id id )
214
227
{
215
- int ret ;
216
-
217
228
b -> c .level = level ;
218
229
b -> c .btree_id = id ;
219
230
220
231
mutex_lock (& bc -> lock );
221
- ret = __bch2_btree_node_hash_insert (bc , b );
222
- if (!ret )
223
- list_add_tail (& b -> list , & bc -> live );
232
+ int ret = __bch2_btree_node_hash_insert (bc , b );
224
233
mutex_unlock (& bc -> lock );
225
234
226
235
return ret ;
@@ -402,7 +411,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
402
411
unsigned i , flags ;
403
412
unsigned long ret = SHRINK_STOP ;
404
413
bool trigger_writes = atomic_long_read (& bc -> nr_dirty ) + nr >=
405
- bc -> nr_used * 3 / 4 ;
414
+ ( bc -> nr_live + bc -> nr_freeable ) * 3 / 4 ;
406
415
407
416
if (bch2_btree_shrinker_disabled )
408
417
return SHRINK_STOP ;
@@ -451,11 +460,12 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
451
460
bc -> not_freed [BCH_BTREE_CACHE_NOT_FREED_access_bit ]++ ;
452
461
-- touched ;;
453
462
} else if (!btree_node_reclaim (c , b , true)) {
463
+ bch2_btree_node_hash_remove (bc , b );
464
+
454
465
freed ++ ;
455
466
btree_node_data_free (c , b );
456
467
bc -> nr_freed ++ ;
457
468
458
- bch2_btree_node_hash_remove (bc , b );
459
469
six_unlock_write (& b -> c .lock );
460
470
six_unlock_intent (& b -> c .lock );
461
471
@@ -506,7 +516,7 @@ static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
506
516
void bch2_fs_btree_cache_exit (struct bch_fs * c )
507
517
{
508
518
struct btree_cache * bc = & c -> btree_cache ;
509
- struct btree * b ;
519
+ struct btree * b , * t ;
510
520
unsigned i , flags ;
511
521
512
522
shrinker_free (bc -> shrink );
@@ -527,11 +537,10 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
527
537
list_add (& r -> b -> list , & bc -> live );
528
538
}
529
539
530
- list_splice (& bc -> freeable , & bc -> live );
531
-
532
- while (!list_empty (& bc -> live )) {
533
- b = list_first_entry (& bc -> live , struct btree , list );
540
+ list_for_each_entry_safe (b , t , & bc -> live , list )
541
+ bch2_btree_node_hash_remove (bc , b );
534
542
543
+ list_for_each_entry_safe (b , t , & bc -> freeable , list ) {
535
544
BUG_ON (btree_node_read_in_flight (b ) ||
536
545
btree_node_write_in_flight (b ));
537
546
@@ -543,8 +552,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
543
552
544
553
list_splice (& bc -> freed_pcpu , & bc -> freed_nonpcpu );
545
554
546
- while (!list_empty (& bc -> freed_nonpcpu )) {
547
- b = list_first_entry (& bc -> freed_nonpcpu , struct btree , list );
555
+ list_for_each_entry_safe (b , t , & bc -> freed_nonpcpu , list ) {
548
556
list_del (& b -> list );
549
557
six_lock_exit (& b -> c .lock );
550
558
kfree (b );
@@ -553,6 +561,11 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
553
561
mutex_unlock (& bc -> lock );
554
562
memalloc_nofs_restore (flags );
555
563
564
+ for (unsigned i = 0 ; i < ARRAY_SIZE (bc -> nr_by_btree ); i ++ )
565
+ BUG_ON (bc -> nr_by_btree [i ]);
566
+ BUG_ON (bc -> nr_live );
567
+ BUG_ON (bc -> nr_freeable );
568
+
556
569
if (bc -> table_init_done )
557
570
rhashtable_destroy (& bc -> table );
558
571
}
@@ -739,7 +752,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
739
752
}
740
753
741
754
mutex_lock (& bc -> lock );
742
- bc -> nr_used ++ ;
755
+ bc -> nr_freeable ++ ;
743
756
got_mem :
744
757
mutex_unlock (& bc -> lock );
745
758
@@ -1280,8 +1293,8 @@ void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)
1280
1293
BUG_ON (btree_node_dirty (b ));
1281
1294
1282
1295
mutex_lock (& bc -> lock );
1283
- btree_node_data_free (c , b );
1284
1296
bch2_btree_node_hash_remove (bc , b );
1297
+ btree_node_data_free (c , b );
1285
1298
mutex_unlock (& bc -> lock );
1286
1299
out :
1287
1300
six_unlock_write (& b -> c .lock );
@@ -1374,7 +1387,8 @@ void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc
1374
1387
if (!out -> nr_tabstops )
1375
1388
printbuf_tabstop_push (out , 32 );
1376
1389
1377
- prt_btree_cache_line (out , c , "total:" , bc -> nr_used );
1390
+ prt_btree_cache_line (out , c , "nr_live:" , bc -> nr_live );
1391
+ prt_btree_cache_line (out , c , "nr_freeable:" , bc -> nr_freeable );
1378
1392
prt_btree_cache_line (out , c , "nr dirty:" , atomic_long_read (& bc -> nr_dirty ));
1379
1393
prt_printf (out , "cannibalize lock:\t%p\n" , bc -> alloc_lock );
1380
1394
prt_newline (out );
0 commit comments