@@ -230,28 +230,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
230
230
# define __UNIQUE_ID (prefix ) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
231
231
#endif
232
232
233
- /*
234
- * Prevent the compiler from merging or refetching reads or writes. The
235
- * compiler is also forbidden from reordering successive instances of
236
- * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
237
- * particular ordering. One way to make the compiler aware of ordering is to
238
- * put the two invocations of READ_ONCE or WRITE_ONCE in different C
239
- * statements.
240
- *
241
- * These two macros will also work on aggregate data types like structs or
242
- * unions.
243
- *
244
- * Their two major use cases are: (1) Mediating communication between
245
- * process-level code and irq/NMI handlers, all running on the same CPU,
246
- * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
247
- * mutilate accesses that either do not require ordering or that interact
248
- * with an explicit memory barrier or atomic instruction that provides the
249
- * required ordering.
250
- */
251
- #include <asm/barrier.h>
252
- #include <linux/kasan-checks.h>
253
- #include <linux/kcsan-checks.h>
254
-
255
233
/**
256
234
* data_race - mark an expression as containing intentional data races
257
235
*
@@ -272,65 +250,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
272
250
__v; \
273
251
})
274
252
275
- /*
276
- * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
277
- * atomicity or dependency ordering guarantees. Note that this may result
278
- * in tears!
279
- */
280
- #define __READ_ONCE (x ) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
281
-
282
- #define __READ_ONCE_SCALAR (x ) \
283
- ({ \
284
- __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \
285
- smp_read_barrier_depends(); \
286
- (typeof(x))__x; \
287
- })
288
-
289
- #define READ_ONCE (x ) \
290
- ({ \
291
- compiletime_assert_rwonce_type(x); \
292
- __READ_ONCE_SCALAR(x); \
293
- })
294
-
295
- #define __WRITE_ONCE (x , val ) \
296
- do { \
297
- *(volatile typeof(x) *)&(x) = (val); \
298
- } while (0)
299
-
300
- #define WRITE_ONCE (x , val ) \
301
- do { \
302
- compiletime_assert_rwonce_type(x); \
303
- __WRITE_ONCE(x, val); \
304
- } while (0)
305
-
306
- static __no_sanitize_or_inline
307
- unsigned long __read_once_word_nocheck (const void * addr )
308
- {
309
- return __READ_ONCE (* (unsigned long * )addr );
310
- }
311
-
312
- /*
313
- * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
314
- * word from memory atomically but without telling KASAN/KCSAN. This is
315
- * usually used by unwinding code when walking the stack of a running process.
316
- */
317
- #define READ_ONCE_NOCHECK (x ) \
318
- ({ \
319
- unsigned long __x; \
320
- compiletime_assert(sizeof(x) == sizeof(__x), \
321
- "Unsupported access size for READ_ONCE_NOCHECK()."); \
322
- __x = __read_once_word_nocheck(&(x)); \
323
- smp_read_barrier_depends(); \
324
- (typeof(x))__x; \
325
- })
326
-
327
- static __no_kasan_or_inline
328
- unsigned long read_word_at_a_time (const void * addr )
329
- {
330
- kasan_check_read (addr , 1 );
331
- return * (unsigned long * )addr ;
332
- }
333
-
334
253
#endif /* __KERNEL__ */
335
254
336
255
/*
@@ -395,16 +314,6 @@ static inline void *offset_to_ptr(const int *off)
395
314
compiletime_assert(__native_word(t), \
396
315
"Need native word sized stores/loads for atomicity.")
397
316
398
- /*
399
- * Yes, this permits 64-bit accesses on 32-bit architectures. These will
400
- * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
401
- * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
402
- * (e.g. a virtual address) and a strong prevailing wind.
403
- */
404
- #define compiletime_assert_rwonce_type (t ) \
405
- compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
406
- "Unsupported access size for {READ,WRITE}_ONCE().")
407
-
408
317
/* &a[0] degrades to a pointer: a different type from an array */
409
318
#define __must_be_array (a ) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
410
319
@@ -414,4 +323,6 @@ static inline void *offset_to_ptr(const int *off)
414
323
*/
415
324
#define prevent_tail_call_optimization () mb()
416
325
326
+ #include <asm/rwonce.h>
327
+
417
328
#endif /* __LINUX_COMPILER_H */
0 commit comments