Skip to content

Commit c670509

Browse files
author
Kent Overstreet
committed
bcachefs: Allocator prefers not to expand mi.btree_allocated bitmap
We now have a small bitmap in the member info section of the superblock for "regions that have btree nodes", so that if we ever have to scan for btree nodes in repair we don't have to scan the whole device(s). This tweaks the allocator to prefer allocating from regions that are already marked in this bitmap. Signed-off-by: Kent Overstreet <[email protected]>
1 parent 4057494 commit c670509

File tree

3 files changed

+66
-11
lines changed

3 files changed

+66
-11
lines changed

fs/bcachefs/alloc_foreground.c

Lines changed: 58 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ void bch2_reset_alloc_cursors(struct bch_fs *c)
7171
{
7272
rcu_read_lock();
7373
for_each_member_device_rcu(c, ca, NULL)
74-
ca->alloc_cursor = 0;
74+
memset(ca->alloc_cursor, 0, sizeof(ca->alloc_cursor));
7575
rcu_read_unlock();
7676
}
7777

@@ -389,7 +389,8 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
389389
struct bkey_s_c k, ck;
390390
struct open_bucket *ob = NULL;
391391
u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
392-
u64 alloc_start = max(first_bucket, READ_ONCE(ca->alloc_cursor));
392+
u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
393+
u64 alloc_start = max(first_bucket, *dev_alloc_cursor);
393394
u64 alloc_cursor = alloc_start;
394395
int ret;
395396

@@ -405,8 +406,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
405406
again:
406407
for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
407408
BTREE_ITER_slots, k, ret) {
408-
struct bch_alloc_v4 a_convert;
409-
const struct bch_alloc_v4 *a;
409+
u64 bucket = k.k->p.offset;
410410

411411
if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
412412
break;
@@ -415,7 +415,24 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
415415
is_superblock_bucket(ca, k.k->p.offset))
416416
continue;
417417

418-
a = bch2_alloc_to_v4(k, &a_convert);
418+
if (s->btree_bitmap != BTREE_BITMAP_ANY &&
419+
s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
420+
bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
421+
if (s->btree_bitmap == BTREE_BITMAP_YES &&
422+
bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
423+
break;
424+
425+
bucket = sector_to_bucket(ca,
426+
round_up(bucket_to_sector(ca, bucket) + 1,
427+
1ULL << ca->mi.btree_bitmap_shift));
428+
bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket));
429+
s->buckets_seen++;
430+
s->skipped_mi_btree_bitmap++;
431+
continue;
432+
}
433+
434+
struct bch_alloc_v4 a_convert;
435+
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
419436
if (a->data_type != BCH_DATA_free)
420437
continue;
421438

@@ -441,7 +458,6 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
441458
bch2_trans_iter_exit(trans, &iter);
442459

443460
alloc_cursor = iter.pos.offset;
444-
ca->alloc_cursor = alloc_cursor;
445461

446462
if (!ob && ret)
447463
ob = ERR_PTR(ret);
@@ -451,6 +467,8 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
451467
goto again;
452468
}
453469

470+
*dev_alloc_cursor = alloc_cursor;
471+
454472
return ob;
455473
}
456474

@@ -463,7 +481,8 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
463481
struct btree_iter iter;
464482
struct bkey_s_c k;
465483
struct open_bucket *ob = NULL;
466-
u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(ca->alloc_cursor));
484+
u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
485+
u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor));
467486
u64 alloc_cursor = alloc_start;
468487
int ret;
469488

@@ -485,6 +504,26 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
485504

486505
s->buckets_seen++;
487506

507+
u64 bucket = alloc_cursor & ~(~0ULL << 56);
508+
if (s->btree_bitmap != BTREE_BITMAP_ANY &&
509+
s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
510+
bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
511+
if (s->btree_bitmap == BTREE_BITMAP_YES &&
512+
bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
513+
goto fail;
514+
515+
bucket = sector_to_bucket(ca,
516+
round_up(bucket_to_sector(ca, bucket) + 1,
517+
1ULL << ca->mi.btree_bitmap_shift));
518+
u64 genbits = alloc_cursor >> 56;
519+
alloc_cursor = bucket | (genbits << 56);
520+
521+
if (alloc_cursor > k.k->p.offset)
522+
bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor));
523+
s->skipped_mi_btree_bitmap++;
524+
continue;
525+
}
526+
488527
ob = try_alloc_bucket(trans, ca, watermark,
489528
alloc_cursor, s, k, cl);
490529
if (ob) {
@@ -496,10 +535,9 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
496535
if (ob || ret)
497536
break;
498537
}
538+
fail:
499539
bch2_trans_iter_exit(trans, &iter);
500540

501-
ca->alloc_cursor = alloc_cursor;
502-
503541
if (!ob && ret)
504542
ob = ERR_PTR(ret);
505543

@@ -508,6 +546,8 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
508546
goto again;
509547
}
510548

549+
*dev_alloc_cursor = alloc_cursor;
550+
511551
return ob;
512552
}
513553

@@ -537,6 +577,7 @@ static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
537577
prt_printf(&buf, "need journal commit\t%llu\n", s->skipped_need_journal_commit);
538578
prt_printf(&buf, "nocow\t%llu\n", s->skipped_nocow);
539579
prt_printf(&buf, "nouse\t%llu\n", s->skipped_nouse);
580+
prt_printf(&buf, "mi_btree_bitmap\t%llu\n", s->skipped_mi_btree_bitmap);
540581

541582
if (!IS_ERR(ob)) {
542583
prt_printf(&buf, "allocated\t%llu\n", ob->bucket);
@@ -571,7 +612,9 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
571612
struct open_bucket *ob = NULL;
572613
bool freespace = READ_ONCE(ca->mi.freespace_initialized);
573614
u64 avail;
574-
struct bucket_alloc_state s = { 0 };
615+
struct bucket_alloc_state s = {
616+
.btree_bitmap = data_type == BCH_DATA_btree,
617+
};
575618
bool waiting = false;
576619
again:
577620
bch2_dev_usage_read_fast(ca, usage);
@@ -609,6 +652,11 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
609652
if (s.skipped_need_journal_commit * 2 > avail)
610653
bch2_journal_flush_async(&c->journal, NULL);
611654

655+
if (!ob && s.btree_bitmap != BTREE_BITMAP_ANY) {
656+
s.btree_bitmap = BTREE_BITMAP_ANY;
657+
goto alloc;
658+
}
659+
612660
if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
613661
freespace = false;
614662
goto alloc;

fs/bcachefs/alloc_types.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,18 @@
99
#include "fifo.h"
1010

1111
struct bucket_alloc_state {
12+
enum {
13+
BTREE_BITMAP_NO,
14+
BTREE_BITMAP_YES,
15+
BTREE_BITMAP_ANY,
16+
} btree_bitmap;
17+
1218
u64 buckets_seen;
1319
u64 skipped_open;
1420
u64 skipped_need_journal_commit;
1521
u64 skipped_nocow;
1622
u64 skipped_nouse;
23+
u64 skipped_mi_btree_bitmap;
1724
};
1825

1926
#define BCH_WATERMARKS() \

fs/bcachefs/bcachefs.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -587,7 +587,7 @@ struct bch_dev {
587587

588588
/* Allocator: */
589589
u64 new_fs_bucket_idx;
590-
u64 alloc_cursor;
590+
u64 alloc_cursor[3];
591591

592592
unsigned nr_open_buckets;
593593
unsigned nr_btree_reserve;

0 commit comments

Comments
 (0)