Skip to content

Commit e506ea4

Browse files
committed
compiler.h: Split {READ,WRITE}_ONCE definitions out into rwonce.h
In preparation for allowing architectures to define their own implementation of the READ_ONCE() macro, move the generic {READ,WRITE}_ONCE() definitions out of the unwieldy 'linux/compiler.h' file and into a new 'rwonce.h' header under 'asm-generic'. Acked-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Paul E. McKenney <[email protected]> Signed-off-by: Will Deacon <[email protected]>
1 parent f143c11 commit e506ea4

File tree

4 files changed

+105
-92
lines changed

4 files changed

+105
-92
lines changed

include/asm-generic/Kbuild

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ mandatory-y += pci.h
4545
mandatory-y += percpu.h
4646
mandatory-y += pgalloc.h
4747
mandatory-y += preempt.h
48+
mandatory-y += rwonce.h
4849
mandatory-y += sections.h
4950
mandatory-y += serial.h
5051
mandatory-y += shmparam.h

include/asm-generic/barrier.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313

1414
#ifndef __ASSEMBLY__
1515

16-
#include <linux/compiler.h>
16+
#include <asm/rwonce.h>
1717

1818
#ifndef nop
1919
#define nop() asm volatile ("nop")

include/asm-generic/rwonce.h

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Prevent the compiler from merging or refetching reads or writes. The
4+
* compiler is also forbidden from reordering successive instances of
5+
* READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
6+
* particular ordering. One way to make the compiler aware of ordering is to
7+
* put the two invocations of READ_ONCE or WRITE_ONCE in different C
8+
* statements.
9+
*
10+
* These two macros will also work on aggregate data types like structs or
11+
* unions.
12+
*
13+
* Their two major use cases are: (1) Mediating communication between
14+
* process-level code and irq/NMI handlers, all running on the same CPU,
15+
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
16+
* mutilate accesses that either do not require ordering or that interact
17+
* with an explicit memory barrier or atomic instruction that provides the
18+
* required ordering.
19+
*/
20+
#ifndef __ASM_GENERIC_RWONCE_H
21+
#define __ASM_GENERIC_RWONCE_H
22+
23+
#ifndef __ASSEMBLY__
24+
25+
#include <linux/compiler_types.h>
26+
#include <linux/kasan-checks.h>
27+
#include <linux/kcsan-checks.h>
28+
29+
#include <asm/barrier.h>
30+
31+
/*
32+
* Yes, this permits 64-bit accesses on 32-bit architectures. These will
33+
* actually be atomic in some cases (namely Armv7 + LPAE), but for others we
34+
* rely on the access being split into 2x32-bit accesses for a 32-bit quantity
35+
* (e.g. a virtual address) and a strong prevailing wind.
36+
*/
37+
#define compiletime_assert_rwonce_type(t) \
38+
compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
39+
"Unsupported access size for {READ,WRITE}_ONCE().")
40+
41+
/*
42+
* Use __READ_ONCE() instead of READ_ONCE() if you do not require any
43+
* atomicity or dependency ordering guarantees. Note that this may result
44+
* in tears!
45+
*/
46+
#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
47+
48+
#define __READ_ONCE_SCALAR(x) \
49+
({ \
50+
__unqual_scalar_typeof(x) __x = __READ_ONCE(x); \
51+
smp_read_barrier_depends(); \
52+
(typeof(x))__x; \
53+
})
54+
55+
#define READ_ONCE(x) \
56+
({ \
57+
compiletime_assert_rwonce_type(x); \
58+
__READ_ONCE_SCALAR(x); \
59+
})
60+
61+
#define __WRITE_ONCE(x, val) \
62+
do { \
63+
*(volatile typeof(x) *)&(x) = (val); \
64+
} while (0)
65+
66+
#define WRITE_ONCE(x, val) \
67+
do { \
68+
compiletime_assert_rwonce_type(x); \
69+
__WRITE_ONCE(x, val); \
70+
} while (0)
71+
72+
static __no_sanitize_or_inline
73+
unsigned long __read_once_word_nocheck(const void *addr)
74+
{
75+
return __READ_ONCE(*(unsigned long *)addr);
76+
}
77+
78+
/*
79+
* Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
80+
* word from memory atomically but without telling KASAN/KCSAN. This is
81+
* usually used by unwinding code when walking the stack of a running process.
82+
*/
83+
#define READ_ONCE_NOCHECK(x) \
84+
({ \
85+
unsigned long __x; \
86+
compiletime_assert(sizeof(x) == sizeof(__x), \
87+
"Unsupported access size for READ_ONCE_NOCHECK()."); \
88+
__x = __read_once_word_nocheck(&(x)); \
89+
smp_read_barrier_depends(); \
90+
(typeof(x))__x; \
91+
})
92+
93+
static __no_kasan_or_inline
94+
unsigned long read_word_at_a_time(const void *addr)
95+
{
96+
kasan_check_read(addr, 1);
97+
return *(unsigned long *)addr;
98+
}
99+
100+
#endif /* __ASSEMBLY__ */
101+
#endif /* __ASM_GENERIC_RWONCE_H */

include/linux/compiler.h

Lines changed: 2 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -230,28 +230,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
230230
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
231231
#endif
232232

233-
/*
234-
* Prevent the compiler from merging or refetching reads or writes. The
235-
* compiler is also forbidden from reordering successive instances of
236-
* READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
237-
* particular ordering. One way to make the compiler aware of ordering is to
238-
* put the two invocations of READ_ONCE or WRITE_ONCE in different C
239-
* statements.
240-
*
241-
* These two macros will also work on aggregate data types like structs or
242-
* unions.
243-
*
244-
* Their two major use cases are: (1) Mediating communication between
245-
* process-level code and irq/NMI handlers, all running on the same CPU,
246-
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
247-
* mutilate accesses that either do not require ordering or that interact
248-
* with an explicit memory barrier or atomic instruction that provides the
249-
* required ordering.
250-
*/
251-
#include <asm/barrier.h>
252-
#include <linux/kasan-checks.h>
253-
#include <linux/kcsan-checks.h>
254-
255233
/**
256234
* data_race - mark an expression as containing intentional data races
257235
*
@@ -272,65 +250,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
272250
__v; \
273251
})
274252

275-
/*
276-
* Use __READ_ONCE() instead of READ_ONCE() if you do not require any
277-
* atomicity or dependency ordering guarantees. Note that this may result
278-
* in tears!
279-
*/
280-
#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
281-
282-
#define __READ_ONCE_SCALAR(x) \
283-
({ \
284-
__unqual_scalar_typeof(x) __x = __READ_ONCE(x); \
285-
smp_read_barrier_depends(); \
286-
(typeof(x))__x; \
287-
})
288-
289-
#define READ_ONCE(x) \
290-
({ \
291-
compiletime_assert_rwonce_type(x); \
292-
__READ_ONCE_SCALAR(x); \
293-
})
294-
295-
#define __WRITE_ONCE(x, val) \
296-
do { \
297-
*(volatile typeof(x) *)&(x) = (val); \
298-
} while (0)
299-
300-
#define WRITE_ONCE(x, val) \
301-
do { \
302-
compiletime_assert_rwonce_type(x); \
303-
__WRITE_ONCE(x, val); \
304-
} while (0)
305-
306-
static __no_sanitize_or_inline
307-
unsigned long __read_once_word_nocheck(const void *addr)
308-
{
309-
return __READ_ONCE(*(unsigned long *)addr);
310-
}
311-
312-
/*
313-
* Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
314-
* word from memory atomically but without telling KASAN/KCSAN. This is
315-
* usually used by unwinding code when walking the stack of a running process.
316-
*/
317-
#define READ_ONCE_NOCHECK(x) \
318-
({ \
319-
unsigned long __x; \
320-
compiletime_assert(sizeof(x) == sizeof(__x), \
321-
"Unsupported access size for READ_ONCE_NOCHECK()."); \
322-
__x = __read_once_word_nocheck(&(x)); \
323-
smp_read_barrier_depends(); \
324-
(typeof(x))__x; \
325-
})
326-
327-
static __no_kasan_or_inline
328-
unsigned long read_word_at_a_time(const void *addr)
329-
{
330-
kasan_check_read(addr, 1);
331-
return *(unsigned long *)addr;
332-
}
333-
334253
#endif /* __KERNEL__ */
335254

336255
/*
@@ -395,16 +314,6 @@ static inline void *offset_to_ptr(const int *off)
395314
compiletime_assert(__native_word(t), \
396315
"Need native word sized stores/loads for atomicity.")
397316

398-
/*
399-
* Yes, this permits 64-bit accesses on 32-bit architectures. These will
400-
* actually be atomic in some cases (namely Armv7 + LPAE), but for others we
401-
* rely on the access being split into 2x32-bit accesses for a 32-bit quantity
402-
* (e.g. a virtual address) and a strong prevailing wind.
403-
*/
404-
#define compiletime_assert_rwonce_type(t) \
405-
compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
406-
"Unsupported access size for {READ,WRITE}_ONCE().")
407-
408317
/* &a[0] degrades to a pointer: a different type from an array */
409318
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
410319

@@ -414,4 +323,6 @@ static inline void *offset_to_ptr(const int *off)
414323
*/
415324
#define prevent_tail_call_optimization() mb()
416325

326+
#include <asm/rwonce.h>
327+
417328
#endif /* __LINUX_COMPILER_H */

0 commit comments

Comments
 (0)