@@ -32,24 +32,24 @@ const char * const bch2_btree_node_flags[] = {
32
32
33
33
void bch2_recalc_btree_reserve (struct bch_fs * c )
34
34
{
35
- unsigned i , reserve = 16 ;
35
+ unsigned reserve = 16 ;
36
36
37
37
if (!c -> btree_roots_known [0 ].b )
38
38
reserve += 8 ;
39
39
40
- for (i = 0 ; i < btree_id_nr_alive (c ); i ++ ) {
40
+ for (unsigned i = 0 ; i < btree_id_nr_alive (c ); i ++ ) {
41
41
struct btree_root * r = bch2_btree_id_root (c , i );
42
42
43
43
if (r -> b )
44
44
reserve += min_t (unsigned , 1 , r -> b -> c .level ) * 8 ;
45
45
}
46
46
47
- c -> btree_cache .reserve = reserve ;
47
+ c -> btree_cache .nr_reserve = reserve ;
48
48
}
49
49
50
- static inline unsigned btree_cache_can_free (struct btree_cache * bc )
50
+ static inline size_t btree_cache_can_free (struct btree_cache * bc )
51
51
{
52
- return max_t (int , 0 , bc -> used - bc -> reserve );
52
+ return max_t (int , 0 , bc -> nr_used - bc -> nr_reserve );
53
53
}
54
54
55
55
static void btree_node_to_freedlist (struct btree_cache * bc , struct btree * b )
@@ -87,7 +87,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
87
87
#endif
88
88
b -> aux_data = NULL ;
89
89
90
- bc -> used -- ;
90
+ bc -> nr_used -- ;
91
91
92
92
btree_node_to_freedlist (bc , b );
93
93
}
@@ -167,7 +167,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
167
167
168
168
bch2_btree_lock_init (& b -> c , 0 );
169
169
170
- bc -> used ++ ;
170
+ bc -> nr_used ++ ;
171
171
list_add (& b -> list , & bc -> freeable );
172
172
return b ;
173
173
}
@@ -194,7 +194,7 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
194
194
b -> hash_val = 0 ;
195
195
196
196
if (b -> c .btree_id < BTREE_ID_NR )
197
- -- bc -> used_by_btree [b -> c .btree_id ];
197
+ -- bc -> nr_by_btree [b -> c .btree_id ];
198
198
}
199
199
200
200
int __bch2_btree_node_hash_insert (struct btree_cache * bc , struct btree * b )
@@ -205,7 +205,7 @@ int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
205
205
int ret = rhashtable_lookup_insert_fast (& bc -> table , & b -> hash ,
206
206
bch_btree_cache_params );
207
207
if (!ret && b -> c .btree_id < BTREE_ID_NR )
208
- bc -> used_by_btree [b -> c .btree_id ]++ ;
208
+ bc -> nr_by_btree [b -> c .btree_id ]++ ;
209
209
return ret ;
210
210
}
211
211
@@ -401,8 +401,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
401
401
unsigned long touched = 0 ;
402
402
unsigned i , flags ;
403
403
unsigned long ret = SHRINK_STOP ;
404
- bool trigger_writes = atomic_read (& bc -> dirty ) + nr >=
405
- bc -> used * 3 / 4 ;
404
+ bool trigger_writes = atomic_long_read (& bc -> nr_dirty ) + nr >=
405
+ bc -> nr_used * 3 / 4 ;
406
406
407
407
if (bch2_btree_shrinker_disabled )
408
408
return SHRINK_STOP ;
@@ -439,7 +439,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
439
439
six_unlock_write (& b -> c .lock );
440
440
six_unlock_intent (& b -> c .lock );
441
441
freed ++ ;
442
- bc -> freed ++ ;
442
+ bc -> nr_freed ++ ;
443
443
}
444
444
}
445
445
restart :
@@ -453,7 +453,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
453
453
} else if (!btree_node_reclaim (c , b , true)) {
454
454
freed ++ ;
455
455
btree_node_data_free (c , b );
456
- bc -> freed ++ ;
456
+ bc -> nr_freed ++ ;
457
457
458
458
bch2_btree_node_hash_remove (bc , b );
459
459
six_unlock_write (& b -> c .lock );
@@ -539,7 +539,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
539
539
}
540
540
541
541
BUG_ON (!bch2_journal_error (& c -> journal ) &&
542
- atomic_read (& c -> btree_cache .dirty ));
542
+ atomic_long_read (& c -> btree_cache .nr_dirty ));
543
543
544
544
list_splice (& bc -> freed_pcpu , & bc -> freed_nonpcpu );
545
545
@@ -572,7 +572,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
572
572
573
573
bch2_recalc_btree_reserve (c );
574
574
575
- for (i = 0 ; i < bc -> reserve ; i ++ )
575
+ for (i = 0 ; i < bc -> nr_reserve ; i ++ )
576
576
if (!__bch2_btree_node_mem_alloc (c ))
577
577
goto err ;
578
578
@@ -739,7 +739,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
739
739
}
740
740
741
741
mutex_lock (& bc -> lock );
742
- bc -> used ++ ;
742
+ bc -> nr_used ++ ;
743
743
got_mem :
744
744
mutex_unlock (& bc -> lock );
745
745
@@ -1353,11 +1353,11 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struc
1353
1353
}
1354
1354
1355
1355
static void prt_btree_cache_line (struct printbuf * out , const struct bch_fs * c ,
1356
- const char * label , unsigned nr )
1356
+ const char * label , size_t nr )
1357
1357
{
1358
1358
prt_printf (out , "%s\t" , label );
1359
1359
prt_human_readable_u64 (out , nr * c -> opts .btree_node_size );
1360
- prt_printf (out , " (%u )\n" , nr );
1360
+ prt_printf (out , " (%zu )\n" , nr );
1361
1361
}
1362
1362
1363
1363
static const char * const bch2_btree_cache_not_freed_reasons_strs [] = {
@@ -1374,16 +1374,16 @@ void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc
1374
1374
if (!out -> nr_tabstops )
1375
1375
printbuf_tabstop_push (out , 32 );
1376
1376
1377
- prt_btree_cache_line (out , c , "total:" , bc -> used );
1378
- prt_btree_cache_line (out , c , "nr dirty:" , atomic_read (& bc -> dirty ));
1377
+ prt_btree_cache_line (out , c , "total:" , bc -> nr_used );
1378
+ prt_btree_cache_line (out , c , "nr dirty:" , atomic_long_read (& bc -> nr_dirty ));
1379
1379
prt_printf (out , "cannibalize lock:\t%p\n" , bc -> alloc_lock );
1380
1380
prt_newline (out );
1381
1381
1382
- for (unsigned i = 0 ; i < ARRAY_SIZE (bc -> used_by_btree ); i ++ )
1383
- prt_btree_cache_line (out , c , bch2_btree_id_str (i ), bc -> used_by_btree [i ]);
1382
+ for (unsigned i = 0 ; i < ARRAY_SIZE (bc -> nr_by_btree ); i ++ )
1383
+ prt_btree_cache_line (out , c , bch2_btree_id_str (i ), bc -> nr_by_btree [i ]);
1384
1384
1385
1385
prt_newline (out );
1386
- prt_printf (out , "freed:\t%u \n" , bc -> freed );
1386
+ prt_printf (out , "freed:\t%zu \n" , bc -> nr_freed );
1387
1387
prt_printf (out , "not freed:\n" );
1388
1388
1389
1389
for (unsigned i = 0 ; i < ARRAY_SIZE (bc -> not_freed ); i ++ )
0 commit comments