Skip to content

Commit e6861be

Browse files
committed
Merge tag 'bcachefs-2023-11-29' of https://evilpiepirate.org/git/bcachefs
Pull more bcachefs bugfixes from Kent Overstreet: - bcache & bcachefs were broken with CFI enabled; patch for closures to fix type punning - mark erasure coding as extra-experimental; there are incompatible disk space accounting changes coming for erasure coding, and I'm still seeing checksum errors in some tests - several fixes for durability-related issues (durability is a device specific setting where we can tell bcachefs that data on a given device should be counted as replicated x times) - a fix for a rare livelock when a btree node merge then updates a parent node that is almost full - fix a race in the device removal path, where dropping a pointer in a btree node to a device would be clobbered by an in flight btree write updating the btree node key on completion - fix one SRCU lock hold time warning in the btree gc code - ther's still a bunch more of these to fix - fix a rare race where we'd start copygc before initializing the "are we rw" percpu refcount; copygc would think we were already ro and die immediately * tag 'bcachefs-2023-11-29' of https://evilpiepirate.org/git/bcachefs: (23 commits) bcachefs: Extra kthread_should_stop() calls for copygc bcachefs: Convert gc_alloc_start() to for_each_btree_key2() bcachefs: Fix race between btree writes and metadata drop bcachefs: move journal seq assertion bcachefs: -EROFS doesn't count as move_extent_start_fail bcachefs: trace_move_extent_start_fail() now includes errcode bcachefs: Fix split_race livelock bcachefs: Fix bucket data type for stripe buckets bcachefs: Add missing validation for jset_entry_data_usage bcachefs: Fix zstd compress workspace size bcachefs: bpos is misaligned on big endian bcachefs: Fix ec + durability calculation bcachefs: Data update path won't accidentaly grow replicas bcachefs: deallocate_extra_replicas() bcachefs: Proper refcounting for journal_keys bcachefs: preserve device path as device name bcachefs: Fix an endianness conversion bcachefs: Start gc, copygc, rebalance threads after initing writes ref bcachefs: Don't stop copygc thread on device resize bcachefs: Make sure bch2_move_ratelimit() also waits for move_ops ...
2 parents 994d5c5 + 415e510 commit e6861be

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+495
-323
lines changed

drivers/md/bcache/btree.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -293,16 +293,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
293293
w->journal = NULL;
294294
}
295295

296-
static void btree_node_write_unlock(struct closure *cl)
296+
static CLOSURE_CALLBACK(btree_node_write_unlock)
297297
{
298-
struct btree *b = container_of(cl, struct btree, io);
298+
closure_type(b, struct btree, io);
299299

300300
up(&b->io_mutex);
301301
}
302302

303-
static void __btree_node_write_done(struct closure *cl)
303+
static CLOSURE_CALLBACK(__btree_node_write_done)
304304
{
305-
struct btree *b = container_of(cl, struct btree, io);
305+
closure_type(b, struct btree, io);
306306
struct btree_write *w = btree_prev_write(b);
307307

308308
bch_bbio_free(b->bio, b->c);
@@ -315,12 +315,12 @@ static void __btree_node_write_done(struct closure *cl)
315315
closure_return_with_destructor(cl, btree_node_write_unlock);
316316
}
317317

318-
static void btree_node_write_done(struct closure *cl)
318+
static CLOSURE_CALLBACK(btree_node_write_done)
319319
{
320-
struct btree *b = container_of(cl, struct btree, io);
320+
closure_type(b, struct btree, io);
321321

322322
bio_free_pages(b->bio);
323-
__btree_node_write_done(cl);
323+
__btree_node_write_done(&cl->work);
324324
}
325325

326326
static void btree_node_write_endio(struct bio *bio)

drivers/md/bcache/journal.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -723,11 +723,11 @@ static void journal_write_endio(struct bio *bio)
723723
closure_put(&w->c->journal.io);
724724
}
725725

726-
static void journal_write(struct closure *cl);
726+
static CLOSURE_CALLBACK(journal_write);
727727

728-
static void journal_write_done(struct closure *cl)
728+
static CLOSURE_CALLBACK(journal_write_done)
729729
{
730-
struct journal *j = container_of(cl, struct journal, io);
730+
closure_type(j, struct journal, io);
731731
struct journal_write *w = (j->cur == j->w)
732732
? &j->w[1]
733733
: &j->w[0];
@@ -736,19 +736,19 @@ static void journal_write_done(struct closure *cl)
736736
continue_at_nobarrier(cl, journal_write, bch_journal_wq);
737737
}
738738

739-
static void journal_write_unlock(struct closure *cl)
739+
static CLOSURE_CALLBACK(journal_write_unlock)
740740
__releases(&c->journal.lock)
741741
{
742-
struct cache_set *c = container_of(cl, struct cache_set, journal.io);
742+
closure_type(c, struct cache_set, journal.io);
743743

744744
c->journal.io_in_flight = 0;
745745
spin_unlock(&c->journal.lock);
746746
}
747747

748-
static void journal_write_unlocked(struct closure *cl)
748+
static CLOSURE_CALLBACK(journal_write_unlocked)
749749
__releases(c->journal.lock)
750750
{
751-
struct cache_set *c = container_of(cl, struct cache_set, journal.io);
751+
closure_type(c, struct cache_set, journal.io);
752752
struct cache *ca = c->cache;
753753
struct journal_write *w = c->journal.cur;
754754
struct bkey *k = &c->journal.key;
@@ -823,12 +823,12 @@ static void journal_write_unlocked(struct closure *cl)
823823
continue_at(cl, journal_write_done, NULL);
824824
}
825825

826-
static void journal_write(struct closure *cl)
826+
static CLOSURE_CALLBACK(journal_write)
827827
{
828-
struct cache_set *c = container_of(cl, struct cache_set, journal.io);
828+
closure_type(c, struct cache_set, journal.io);
829829

830830
spin_lock(&c->journal.lock);
831-
journal_write_unlocked(cl);
831+
journal_write_unlocked(&cl->work);
832832
}
833833

834834
static void journal_try_write(struct cache_set *c)

drivers/md/bcache/movinggc.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -35,16 +35,16 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
3535

3636
/* Moving GC - IO loop */
3737

38-
static void moving_io_destructor(struct closure *cl)
38+
static CLOSURE_CALLBACK(moving_io_destructor)
3939
{
40-
struct moving_io *io = container_of(cl, struct moving_io, cl);
40+
closure_type(io, struct moving_io, cl);
4141

4242
kfree(io);
4343
}
4444

45-
static void write_moving_finish(struct closure *cl)
45+
static CLOSURE_CALLBACK(write_moving_finish)
4646
{
47-
struct moving_io *io = container_of(cl, struct moving_io, cl);
47+
closure_type(io, struct moving_io, cl);
4848
struct bio *bio = &io->bio.bio;
4949

5050
bio_free_pages(bio);
@@ -89,9 +89,9 @@ static void moving_init(struct moving_io *io)
8989
bch_bio_map(bio, NULL);
9090
}
9191

92-
static void write_moving(struct closure *cl)
92+
static CLOSURE_CALLBACK(write_moving)
9393
{
94-
struct moving_io *io = container_of(cl, struct moving_io, cl);
94+
closure_type(io, struct moving_io, cl);
9595
struct data_insert_op *op = &io->op;
9696

9797
if (!op->status) {
@@ -113,9 +113,9 @@ static void write_moving(struct closure *cl)
113113
continue_at(cl, write_moving_finish, op->wq);
114114
}
115115

116-
static void read_moving_submit(struct closure *cl)
116+
static CLOSURE_CALLBACK(read_moving_submit)
117117
{
118-
struct moving_io *io = container_of(cl, struct moving_io, cl);
118+
closure_type(io, struct moving_io, cl);
119119
struct bio *bio = &io->bio.bio;
120120

121121
bch_submit_bbio(bio, io->op.c, &io->w->key, 0);

drivers/md/bcache/request.c

Lines changed: 37 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525

2626
struct kmem_cache *bch_search_cache;
2727

28-
static void bch_data_insert_start(struct closure *cl);
28+
static CLOSURE_CALLBACK(bch_data_insert_start);
2929

3030
static unsigned int cache_mode(struct cached_dev *dc)
3131
{
@@ -55,9 +55,9 @@ static void bio_csum(struct bio *bio, struct bkey *k)
5555

5656
/* Insert data into cache */
5757

58-
static void bch_data_insert_keys(struct closure *cl)
58+
static CLOSURE_CALLBACK(bch_data_insert_keys)
5959
{
60-
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
60+
closure_type(op, struct data_insert_op, cl);
6161
atomic_t *journal_ref = NULL;
6262
struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
6363
int ret;
@@ -136,9 +136,9 @@ static void bch_data_invalidate(struct closure *cl)
136136
continue_at(cl, bch_data_insert_keys, op->wq);
137137
}
138138

139-
static void bch_data_insert_error(struct closure *cl)
139+
static CLOSURE_CALLBACK(bch_data_insert_error)
140140
{
141-
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
141+
closure_type(op, struct data_insert_op, cl);
142142

143143
/*
144144
* Our data write just errored, which means we've got a bunch of keys to
@@ -163,7 +163,7 @@ static void bch_data_insert_error(struct closure *cl)
163163

164164
op->insert_keys.top = dst;
165165

166-
bch_data_insert_keys(cl);
166+
bch_data_insert_keys(&cl->work);
167167
}
168168

169169
static void bch_data_insert_endio(struct bio *bio)
@@ -184,9 +184,9 @@ static void bch_data_insert_endio(struct bio *bio)
184184
bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
185185
}
186186

187-
static void bch_data_insert_start(struct closure *cl)
187+
static CLOSURE_CALLBACK(bch_data_insert_start)
188188
{
189-
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
189+
closure_type(op, struct data_insert_op, cl);
190190
struct bio *bio = op->bio, *n;
191191

192192
if (op->bypass)
@@ -305,16 +305,16 @@ static void bch_data_insert_start(struct closure *cl)
305305
* If op->bypass is true, instead of inserting the data it invalidates the
306306
* region of the cache represented by op->bio and op->inode.
307307
*/
308-
void bch_data_insert(struct closure *cl)
308+
CLOSURE_CALLBACK(bch_data_insert)
309309
{
310-
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
310+
closure_type(op, struct data_insert_op, cl);
311311

312312
trace_bcache_write(op->c, op->inode, op->bio,
313313
op->writeback, op->bypass);
314314

315315
bch_keylist_init(&op->insert_keys);
316316
bio_get(op->bio);
317-
bch_data_insert_start(cl);
317+
bch_data_insert_start(&cl->work);
318318
}
319319

320320
/*
@@ -575,9 +575,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
575575
return n == bio ? MAP_DONE : MAP_CONTINUE;
576576
}
577577

578-
static void cache_lookup(struct closure *cl)
578+
static CLOSURE_CALLBACK(cache_lookup)
579579
{
580-
struct search *s = container_of(cl, struct search, iop.cl);
580+
closure_type(s, struct search, iop.cl);
581581
struct bio *bio = &s->bio.bio;
582582
struct cached_dev *dc;
583583
int ret;
@@ -698,9 +698,9 @@ static void do_bio_hook(struct search *s,
698698
bio_cnt_set(bio, 3);
699699
}
700700

701-
static void search_free(struct closure *cl)
701+
static CLOSURE_CALLBACK(search_free)
702702
{
703-
struct search *s = container_of(cl, struct search, cl);
703+
closure_type(s, struct search, cl);
704704

705705
atomic_dec(&s->iop.c->search_inflight);
706706

@@ -749,33 +749,33 @@ static inline struct search *search_alloc(struct bio *bio,
749749

750750
/* Cached devices */
751751

752-
static void cached_dev_bio_complete(struct closure *cl)
752+
static CLOSURE_CALLBACK(cached_dev_bio_complete)
753753
{
754-
struct search *s = container_of(cl, struct search, cl);
754+
closure_type(s, struct search, cl);
755755
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
756756

757757
cached_dev_put(dc);
758-
search_free(cl);
758+
search_free(&cl->work);
759759
}
760760

761761
/* Process reads */
762762

763-
static void cached_dev_read_error_done(struct closure *cl)
763+
static CLOSURE_CALLBACK(cached_dev_read_error_done)
764764
{
765-
struct search *s = container_of(cl, struct search, cl);
765+
closure_type(s, struct search, cl);
766766

767767
if (s->iop.replace_collision)
768768
bch_mark_cache_miss_collision(s->iop.c, s->d);
769769

770770
if (s->iop.bio)
771771
bio_free_pages(s->iop.bio);
772772

773-
cached_dev_bio_complete(cl);
773+
cached_dev_bio_complete(&cl->work);
774774
}
775775

776-
static void cached_dev_read_error(struct closure *cl)
776+
static CLOSURE_CALLBACK(cached_dev_read_error)
777777
{
778-
struct search *s = container_of(cl, struct search, cl);
778+
closure_type(s, struct search, cl);
779779
struct bio *bio = &s->bio.bio;
780780

781781
/*
@@ -801,9 +801,9 @@ static void cached_dev_read_error(struct closure *cl)
801801
continue_at(cl, cached_dev_read_error_done, NULL);
802802
}
803803

804-
static void cached_dev_cache_miss_done(struct closure *cl)
804+
static CLOSURE_CALLBACK(cached_dev_cache_miss_done)
805805
{
806-
struct search *s = container_of(cl, struct search, cl);
806+
closure_type(s, struct search, cl);
807807
struct bcache_device *d = s->d;
808808

809809
if (s->iop.replace_collision)
@@ -812,13 +812,13 @@ static void cached_dev_cache_miss_done(struct closure *cl)
812812
if (s->iop.bio)
813813
bio_free_pages(s->iop.bio);
814814

815-
cached_dev_bio_complete(cl);
815+
cached_dev_bio_complete(&cl->work);
816816
closure_put(&d->cl);
817817
}
818818

819-
static void cached_dev_read_done(struct closure *cl)
819+
static CLOSURE_CALLBACK(cached_dev_read_done)
820820
{
821-
struct search *s = container_of(cl, struct search, cl);
821+
closure_type(s, struct search, cl);
822822
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
823823

824824
/*
@@ -858,9 +858,9 @@ static void cached_dev_read_done(struct closure *cl)
858858
continue_at(cl, cached_dev_cache_miss_done, NULL);
859859
}
860860

861-
static void cached_dev_read_done_bh(struct closure *cl)
861+
static CLOSURE_CALLBACK(cached_dev_read_done_bh)
862862
{
863-
struct search *s = container_of(cl, struct search, cl);
863+
closure_type(s, struct search, cl);
864864
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
865865

866866
bch_mark_cache_accounting(s->iop.c, s->d,
@@ -955,13 +955,13 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
955955

956956
/* Process writes */
957957

958-
static void cached_dev_write_complete(struct closure *cl)
958+
static CLOSURE_CALLBACK(cached_dev_write_complete)
959959
{
960-
struct search *s = container_of(cl, struct search, cl);
960+
closure_type(s, struct search, cl);
961961
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
962962

963963
up_read_non_owner(&dc->writeback_lock);
964-
cached_dev_bio_complete(cl);
964+
cached_dev_bio_complete(&cl->work);
965965
}
966966

967967
static void cached_dev_write(struct cached_dev *dc, struct search *s)
@@ -1048,9 +1048,9 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
10481048
continue_at(cl, cached_dev_write_complete, NULL);
10491049
}
10501050

1051-
static void cached_dev_nodata(struct closure *cl)
1051+
static CLOSURE_CALLBACK(cached_dev_nodata)
10521052
{
1053-
struct search *s = container_of(cl, struct search, cl);
1053+
closure_type(s, struct search, cl);
10541054
struct bio *bio = &s->bio.bio;
10551055

10561056
if (s->iop.flush_journal)
@@ -1265,9 +1265,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
12651265
return MAP_CONTINUE;
12661266
}
12671267

1268-
static void flash_dev_nodata(struct closure *cl)
1268+
static CLOSURE_CALLBACK(flash_dev_nodata)
12691269
{
1270-
struct search *s = container_of(cl, struct search, cl);
1270+
closure_type(s, struct search, cl);
12711271

12721272
if (s->iop.flush_journal)
12731273
bch_journal_meta(s->iop.c, cl);

drivers/md/bcache/request.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ struct data_insert_op {
3434
};
3535

3636
unsigned int bch_get_congested(const struct cache_set *c);
37-
void bch_data_insert(struct closure *cl);
37+
CLOSURE_CALLBACK(bch_data_insert);
3838

3939
void bch_cached_dev_request_init(struct cached_dev *dc);
4040
void cached_dev_submit_bio(struct bio *bio);

0 commit comments

Comments
 (0)