Skip to content

Commit f1e67e3

Browse files
committed
fs/buffer: Make BH_Uptodate_Lock bit_spin_lock a regular spinlock_t
Bit spinlocks are problematic if PREEMPT_RT is enabled, because they disable preemption, which is undesired for latency reasons and breaks when regular spinlocks are taken within the bit_spinlock locked region because regular spinlocks are converted to 'sleeping spinlocks' on RT. PREEMPT_RT replaced the bit spinlocks with regular spinlocks to avoid this problem. The replacement was done conditionaly at compile time, but Christoph requested to do an unconditional conversion. Jan suggested to move the spinlock into a existing padding hole which avoids a size increase of struct buffer_head on production kernels. As a benefit the lock gains lockdep coverage. [ bigeasy: Remove the wrapper and use always spinlock_t and move it into the padding hole ] Signed-off-by: Thomas Gleixner <[email protected]> Signed-off-by: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Jan Kara <[email protected]> Cc: Christoph Hellwig <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent fc32150 commit f1e67e3

File tree

4 files changed

+16
-26
lines changed

4 files changed

+16
-26
lines changed

fs/buffer.c

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -274,8 +274,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
274274
* decide that the page is now completely done.
275275
*/
276276
first = page_buffers(page);
277-
local_irq_save(flags);
278-
bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
277+
spin_lock_irqsave(&first->b_uptodate_lock, flags);
279278
clear_buffer_async_read(bh);
280279
unlock_buffer(bh);
281280
tmp = bh;
@@ -288,8 +287,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
288287
}
289288
tmp = tmp->b_this_page;
290289
} while (tmp != bh);
291-
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
292-
local_irq_restore(flags);
290+
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
293291

294292
/*
295293
* If none of the buffers had errors and they are all
@@ -301,8 +299,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
301299
return;
302300

303301
still_busy:
304-
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
305-
local_irq_restore(flags);
302+
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
306303
return;
307304
}
308305

@@ -371,8 +368,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
371368
}
372369

373370
first = page_buffers(page);
374-
local_irq_save(flags);
375-
bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
371+
spin_lock_irqsave(&first->b_uptodate_lock, flags);
376372

377373
clear_buffer_async_write(bh);
378374
unlock_buffer(bh);
@@ -384,14 +380,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
384380
}
385381
tmp = tmp->b_this_page;
386382
}
387-
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
388-
local_irq_restore(flags);
383+
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
389384
end_page_writeback(page);
390385
return;
391386

392387
still_busy:
393-
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
394-
local_irq_restore(flags);
388+
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
395389
return;
396390
}
397391
EXPORT_SYMBOL(end_buffer_async_write);
@@ -3385,6 +3379,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
33853379
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
33863380
if (ret) {
33873381
INIT_LIST_HEAD(&ret->b_assoc_buffers);
3382+
spin_lock_init(&ret->b_uptodate_lock);
33883383
preempt_disable();
33893384
__this_cpu_inc(bh_accounting.nr);
33903385
recalc_bh_state();

fs/ext4/page-io.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -125,11 +125,10 @@ static void ext4_finish_bio(struct bio *bio)
125125
}
126126
bh = head = page_buffers(page);
127127
/*
128-
* We check all buffers in the page under BH_Uptodate_Lock
128+
* We check all buffers in the page under b_uptodate_lock
129129
* to avoid races with other end io clearing async_write flags
130130
*/
131-
local_irq_save(flags);
132-
bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
131+
spin_lock_irqsave(&head->b_uptodate_lock, flags);
133132
do {
134133
if (bh_offset(bh) < bio_start ||
135134
bh_offset(bh) + bh->b_size > bio_end) {
@@ -141,8 +140,7 @@ static void ext4_finish_bio(struct bio *bio)
141140
if (bio->bi_status)
142141
buffer_io_error(bh);
143142
} while ((bh = bh->b_this_page) != head);
144-
bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
145-
local_irq_restore(flags);
143+
spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
146144
if (!under_io) {
147145
fscrypt_free_bounce_page(bounce_page);
148146
end_page_writeback(page);

fs/ntfs/aops.c

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
9292
"0x%llx.", (unsigned long long)bh->b_blocknr);
9393
}
9494
first = page_buffers(page);
95-
local_irq_save(flags);
96-
bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
95+
spin_lock_irqsave(&first->b_uptodate_lock, flags);
9796
clear_buffer_async_read(bh);
9897
unlock_buffer(bh);
9998
tmp = bh;
@@ -108,8 +107,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
108107
}
109108
tmp = tmp->b_this_page;
110109
} while (tmp != bh);
111-
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
112-
local_irq_restore(flags);
110+
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
113111
/*
114112
* If none of the buffers had errors then we can set the page uptodate,
115113
* but we first have to perform the post read mst fixups, if the
@@ -142,8 +140,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
142140
unlock_page(page);
143141
return;
144142
still_busy:
145-
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
146-
local_irq_restore(flags);
143+
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
147144
return;
148145
}
149146

include/linux/buffer_head.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,6 @@ enum bh_state_bits {
2222
BH_Dirty, /* Is dirty */
2323
BH_Lock, /* Is locked */
2424
BH_Req, /* Has been submitted for I/O */
25-
BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
26-
* IO completion of other buffers in the page
27-
*/
2825

2926
BH_Mapped, /* Has a disk mapping */
3027
BH_New, /* Disk mapping was newly created by get_block */
@@ -76,6 +73,9 @@ struct buffer_head {
7673
struct address_space *b_assoc_map; /* mapping this buffer is
7774
associated with */
7875
atomic_t b_count; /* users using this buffer_head */
76+
spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
77+
* serialise IO completion of other
78+
* buffers in the page */
7979
};
8080

8181
/*

0 commit comments

Comments
 (0)