|
59 | 59 | #define __force_percpu_prefix "%%"__stringify(__percpu_seg)":"
|
60 | 60 | #define __my_cpu_offset this_cpu_read(this_cpu_off)
|
61 | 61 |
|
62 |
| -#ifdef CONFIG_X86_64 |
63 |
| -#define __raw_my_cpu_offset raw_cpu_read_8(this_cpu_off); |
64 |
| -#else |
65 |
| -#define __raw_my_cpu_offset raw_cpu_read_4(this_cpu_off); |
66 |
| -#endif |
67 |
| - |
68 | 62 | /*
|
69 | 63 | * Compared to the generic __my_cpu_offset version, the following
|
70 | 64 | * saves one instruction and avoids clobbering a temp register.
|
|
76 | 70 | #ifndef BUILD_VDSO32_64
|
77 | 71 | #define arch_raw_cpu_ptr(_ptr) \
|
78 | 72 | ({ \
|
79 |
| - unsigned long tcp_ptr__ = __raw_my_cpu_offset; \ |
| 73 | + unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off); \ |
80 | 74 | tcp_ptr__ += (__force unsigned long)(_ptr); \
|
81 | 75 | (typeof(*(_ptr)) __kernel __force *)tcp_ptr__; \
|
82 | 76 | })
|
@@ -563,9 +557,13 @@ do { \
|
563 | 557 | #define this_cpu_xchg_8(pcp, nval) this_percpu_xchg_op(pcp, nval)
|
564 | 558 | #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
|
565 | 559 | #define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval)
|
| 560 | + |
| 561 | +#define raw_cpu_read_long(pcp) raw_cpu_read_8(pcp) |
566 | 562 | #else
|
567 | 563 | /* There is no generic 64 bit read stable operation for 32 bit targets. */
|
568 |
| -#define this_cpu_read_stable_8(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) |
| 564 | +#define this_cpu_read_stable_8(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) |
| 565 | + |
| 566 | +#define raw_cpu_read_long(pcp) raw_cpu_read_4(pcp) |
569 | 567 | #endif
|
570 | 568 |
|
571 | 569 | #define x86_this_cpu_constant_test_bit(_nr, _var) \
|
|
0 commit comments