Skip to content

Commit 8238b45

Browse files
Mikulas Patockatorvalds
authored andcommitted
wait_on_bit: add an acquire memory barrier
There are several places in the kernel where wait_on_bit is not followed by a memory barrier (for example, in drivers/md/dm-bufio.c:new_read). On architectures with weak memory ordering, it may happen that memory accesses that follow wait_on_bit are reordered before wait_on_bit and they may return invalid data. Fix this class of bugs by introducing a new function "test_bit_acquire" that works like test_bit, but has acquire memory ordering semantics. Signed-off-by: Mikulas Patocka <[email protected]> Acked-by: Will Deacon <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 4c61282 commit 8238b45

File tree

10 files changed

+60
-12
lines changed

10 files changed

+60
-12
lines changed

Documentation/atomic_bitops.txt

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -58,13 +58,11 @@ Like with atomic_t, the rule of thumb is:
5858

5959
- RMW operations that have a return value are fully ordered.
6060

61-
- RMW operations that are conditional are unordered on FAILURE,
62-
otherwise the above rules apply. In the case of test_and_set_bit_lock(),
63-
if the bit in memory is unchanged by the operation then it is deemed to have
64-
failed.
61+
- RMW operations that are conditional are fully ordered.
6562

66-
Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics and
67-
clear_bit_unlock() which has RELEASE semantics.
63+
Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics,
64+
clear_bit_unlock() which has RELEASE semantics and test_bit_acquire which has
65+
ACQUIRE semantics.
6866

6967
Since a platform only has a single means of achieving atomic operations
7068
the same barriers as for atomic_t are used, see atomic_t.txt.

arch/x86/include/asm/bitops.h

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,20 @@ static __always_inline bool constant_test_bit(long nr, const volatile unsigned l
207207
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
208208
}
209209

210+
static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr)
211+
{
212+
bool oldbit;
213+
214+
asm volatile("testb %2,%1"
215+
CC_SET(nz)
216+
: CC_OUT(nz) (oldbit)
217+
: "m" (((unsigned char *)addr)[nr >> 3]),
218+
"i" (1 << (nr & 7))
219+
:"memory");
220+
221+
return oldbit;
222+
}
223+
210224
static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
211225
{
212226
bool oldbit;
@@ -226,6 +240,13 @@ arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
226240
variable_test_bit(nr, addr);
227241
}
228242

243+
static __always_inline bool
244+
arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
245+
{
246+
return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) :
247+
variable_test_bit(nr, addr);
248+
}
249+
229250
/**
230251
* __ffs - find first set bit in word
231252
* @word: The word to search

include/asm-generic/bitops/generic-non-atomic.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
55

66
#include <linux/bits.h>
7+
#include <asm/barrier.h>
78

89
#ifndef _LINUX_BITOPS_H
910
#error only <linux/bitops.h> can be included directly
@@ -127,6 +128,18 @@ generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
127128
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
128129
}
129130

131+
/**
132+
* generic_test_bit_acquire - Determine, with acquire semantics, whether a bit is set
133+
* @nr: bit number to test
134+
* @addr: Address to start counting from
135+
*/
136+
static __always_inline bool
137+
generic_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
138+
{
139+
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
140+
return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
141+
}
142+
130143
/*
131144
* const_*() definitions provide good compile-time optimizations when
132145
* the passed arguments can be resolved at compile time.
@@ -137,6 +150,7 @@ generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
137150
#define const___test_and_set_bit generic___test_and_set_bit
138151
#define const___test_and_clear_bit generic___test_and_clear_bit
139152
#define const___test_and_change_bit generic___test_and_change_bit
153+
#define const_test_bit_acquire generic_test_bit_acquire
140154

141155
/**
142156
* const_test_bit - Determine whether a bit is set

include/asm-generic/bitops/instrumented-non-atomic.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,4 +142,16 @@ _test_bit(unsigned long nr, const volatile unsigned long *addr)
142142
return arch_test_bit(nr, addr);
143143
}
144144

145+
/**
146+
* _test_bit_acquire - Determine, with acquire semantics, whether a bit is set
147+
* @nr: bit number to test
148+
* @addr: Address to start counting from
149+
*/
150+
static __always_inline bool
151+
_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
152+
{
153+
instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
154+
return arch_test_bit_acquire(nr, addr);
155+
}
156+
145157
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */

include/asm-generic/bitops/non-atomic.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#define arch___test_and_change_bit generic___test_and_change_bit
1414

1515
#define arch_test_bit generic_test_bit
16+
#define arch_test_bit_acquire generic_test_bit_acquire
1617

1718
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
1819

include/asm-generic/bitops/non-instrumented-non-atomic.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,5 +12,6 @@
1212
#define ___test_and_change_bit arch___test_and_change_bit
1313

1414
#define _test_bit arch_test_bit
15+
#define _test_bit_acquire arch_test_bit_acquire
1516

1617
#endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */

include/linux/bitops.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ extern unsigned long __sw_hweight64(__u64 w);
5959
#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
6060
#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
6161
#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
62+
#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr)
6263

6364
/*
6465
* Include this here because some architectures need generic_ffs/fls in

include/linux/buffer_head.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ static __always_inline int buffer_uptodate(const struct buffer_head *bh)
156156
* make it consistent with folio_test_uptodate
157157
* pairs with smp_mb__before_atomic in set_buffer_uptodate
158158
*/
159-
return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0;
159+
return test_bit_acquire(BH_Uptodate, &bh->b_state);
160160
}
161161

162162
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)

include/linux/wait_bit.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ static inline int
7171
wait_on_bit(unsigned long *word, int bit, unsigned mode)
7272
{
7373
might_sleep();
74-
if (!test_bit(bit, word))
74+
if (!test_bit_acquire(bit, word))
7575
return 0;
7676
return out_of_line_wait_on_bit(word, bit,
7777
bit_wait,
@@ -96,7 +96,7 @@ static inline int
9696
wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
9797
{
9898
might_sleep();
99-
if (!test_bit(bit, word))
99+
if (!test_bit_acquire(bit, word))
100100
return 0;
101101
return out_of_line_wait_on_bit(word, bit,
102102
bit_wait_io,
@@ -123,7 +123,7 @@ wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
123123
unsigned long timeout)
124124
{
125125
might_sleep();
126-
if (!test_bit(bit, word))
126+
if (!test_bit_acquire(bit, word))
127127
return 0;
128128
return out_of_line_wait_on_bit_timeout(word, bit,
129129
bit_wait_timeout,
@@ -151,7 +151,7 @@ wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
151151
unsigned mode)
152152
{
153153
might_sleep();
154-
if (!test_bit(bit, word))
154+
if (!test_bit_acquire(bit, word))
155155
return 0;
156156
return out_of_line_wait_on_bit(word, bit, action, mode);
157157
}

kernel/sched/wait_bit.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_
4747
prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
4848
if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags))
4949
ret = (*action)(&wbq_entry->key, mode);
50-
} while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
50+
} while (test_bit_acquire(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
5151

5252
finish_wait(wq_head, &wbq_entry->wq_entry);
5353

0 commit comments

Comments
 (0)