@@ -287,6 +287,19 @@ static int btree_key_cache_create(struct btree_trans *trans,
287
287
return ret ;
288
288
}
289
289
290
+ static noinline_for_stack void do_trace_key_cache_fill (struct btree_trans * trans ,
291
+ struct btree_path * ck_path ,
292
+ struct bkey_s_c k )
293
+ {
294
+ struct printbuf buf = PRINTBUF ;
295
+
296
+ bch2_bpos_to_text (& buf , ck_path -> pos );
297
+ prt_char (& buf , ' ' );
298
+ bch2_bkey_val_to_text (& buf , trans -> c , k );
299
+ trace_key_cache_fill (trans , buf .buf );
300
+ printbuf_exit (& buf );
301
+ }
302
+
290
303
static noinline int btree_key_cache_fill (struct btree_trans * trans ,
291
304
struct btree_path * ck_path ,
292
305
unsigned flags )
@@ -320,15 +333,8 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans,
320
333
if (ret )
321
334
goto err ;
322
335
323
- if (trace_key_cache_fill_enabled ()) {
324
- struct printbuf buf = PRINTBUF ;
325
-
326
- bch2_bpos_to_text (& buf , ck_path -> pos );
327
- prt_char (& buf , ' ' );
328
- bch2_bkey_val_to_text (& buf , trans -> c , k );
329
- trace_key_cache_fill (trans , buf .buf );
330
- printbuf_exit (& buf );
331
- }
336
+ if (trace_key_cache_fill_enabled ())
337
+ do_trace_key_cache_fill (trans , ck_path , k );
332
338
out :
333
339
/* We're not likely to need this iterator again: */
334
340
bch2_set_btree_iter_dontneed (& iter );
0 commit comments