3
3
#define _ASM_X86_PERCPU_H
4
4
5
5
#ifdef CONFIG_X86_64
6
- #define __percpu_seg gs
7
- #define __percpu_rel (%rip)
6
+ # define __percpu_seg gs
7
+ # define __percpu_rel (%rip)
8
8
#else
9
- #define __percpu_seg fs
10
- #define __percpu_rel
9
+ # define __percpu_seg fs
10
+ # define __percpu_rel
11
11
#endif
12
12
13
13
#ifdef __ASSEMBLY__
14
14
15
15
#ifdef CONFIG_SMP
16
- #define __percpu %__percpu_seg:
16
+ # define __percpu %__percpu_seg:
17
17
#else
18
- #define __percpu
18
+ # define __percpu
19
19
#endif
20
20
21
21
#define PER_CPU_VAR (var ) __percpu(var)__percpu_rel
22
22
23
23
#ifdef CONFIG_X86_64_SMP
24
- #define INIT_PER_CPU_VAR (var ) init_per_cpu__##var
24
+ # define INIT_PER_CPU_VAR (var ) init_per_cpu__##var
25
25
#else
26
- #define INIT_PER_CPU_VAR (var ) var
26
+ # define INIT_PER_CPU_VAR (var ) var
27
27
#endif
28
28
29
- #else /* ...!ASSEMBLY */
29
+ #else /* !__ASSEMBLY__: */
30
30
31
31
#include <linux/build_bug.h>
32
32
#include <linux/stringify.h>
37
37
#ifdef CONFIG_CC_HAS_NAMED_AS
38
38
39
39
#ifdef __CHECKER__
40
- #define __seg_gs __attribute__((address_space(__seg_gs)))
41
- #define __seg_fs __attribute__((address_space(__seg_fs)))
40
+ # define __seg_gs __attribute__((address_space(__seg_gs)))
41
+ # define __seg_fs __attribute__((address_space(__seg_fs)))
42
42
#endif
43
43
44
44
#ifdef CONFIG_X86_64
45
- #define __percpu_seg_override __seg_gs
45
+ # define __percpu_seg_override __seg_gs
46
46
#else
47
- #define __percpu_seg_override __seg_fs
47
+ # define __percpu_seg_override __seg_fs
48
48
#endif
49
49
50
50
#define __percpu_prefix ""
51
51
52
- #else /* CONFIG_CC_HAS_NAMED_AS */
52
+ #else /* ! CONFIG_CC_HAS_NAMED_AS: */
53
53
54
54
#define __percpu_seg_override
55
55
#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
80
80
81
81
#define PER_CPU_VAR (var ) %__percpu_seg:(var)__percpu_rel
82
82
83
- #else /* CONFIG_SMP */
83
+ #else /* !CONFIG_SMP: */
84
+
84
85
#define __percpu_seg_override
85
86
#define __percpu_prefix ""
86
87
#define __force_percpu_prefix ""
96
97
#define __force_percpu_arg (x ) __force_percpu_prefix "%" #x
97
98
98
99
/*
99
- * Initialized pointers to per-cpu variables needed for the boot
100
+ * Initialized pointers to per-CPU variables needed for the boot
100
101
* processor need to use these macros to get the proper address
101
102
* offset from __per_cpu_load on SMP.
102
103
*
106
107
extern typeof(var) init_per_cpu_var(var)
107
108
108
109
#ifdef CONFIG_X86_64_SMP
109
- #define init_per_cpu_var (var ) init_per_cpu__##var
110
+ # define init_per_cpu_var (var ) init_per_cpu__##var
110
111
#else
111
- #define init_per_cpu_var (var ) var
112
+ # define init_per_cpu_var (var ) var
112
113
#endif
113
114
114
- /* For arch-specific code, we can use direct single-insn ops (they
115
- * don't give an lvalue though). */
115
+ /*
116
+ * For arch-specific code, we can use direct single-insn ops (they
117
+ * don't give an lvalue though).
118
+ */
116
119
117
120
#define __pcpu_type_1 u8
118
121
#define __pcpu_type_2 u16
@@ -158,7 +161,7 @@ do { \
158
161
159
162
#define __raw_cpu_read_const (pcp ) __raw_cpu_read(, , pcp)
160
163
161
- #else /* CONFIG_USE_X86_SEG_SUPPORT */
164
+ #else /* ! CONFIG_USE_X86_SEG_SUPPORT: */
162
165
163
166
#define __raw_cpu_read (size , qual , _var ) \
164
167
({ \
@@ -183,7 +186,7 @@ do { \
183
186
} while (0)
184
187
185
188
/*
186
- * The generic per-cpu infrastrucutre is not suitable for
189
+ * The generic per-CPU infrastrucutre is not suitable for
187
190
* reading const-qualified variables.
188
191
*/
189
192
#define __raw_cpu_read_const (pcp ) ({ BUILD_BUG(); (typeof(pcp))0; })
@@ -219,7 +222,7 @@ do { \
219
222
} while (0)
220
223
221
224
/*
222
- * Generate a percpu add to memory instruction and optimize code
225
+ * Generate a per-CPU add to memory instruction and optimize code
223
226
* if one is added or subtracted.
224
227
*/
225
228
#define percpu_add_op (size , qual , var , val ) \
@@ -266,9 +269,9 @@ do { \
266
269
})
267
270
268
271
/*
269
- * this_cpu_xchg() is implemented using cmpxchg without a lock prefix.
270
- * xchg is expensive due to the implied lock prefix. The processor
271
- * cannot prefetch cachelines if xchg is used.
272
+ * this_cpu_xchg() is implemented using CMPXCHG without a LOCK prefix.
273
+ * XCHG is expensive due to the implied LOCK prefix. The processor
274
+ * cannot prefetch cachelines if XCHG is used.
272
275
*/
273
276
#define this_percpu_xchg_op (_var , _nval ) \
274
277
({ \
@@ -278,8 +281,8 @@ do { \
278
281
})
279
282
280
283
/*
281
- * cmpxchg has no such implied lock semantics as a result it is much
282
- * more efficient for cpu local operations.
284
+ * CMPXCHG has no such implied lock semantics as a result it is much
285
+ * more efficient for CPU- local operations.
283
286
*/
284
287
#define percpu_cmpxchg_op (size , qual , _var , _oval , _nval ) \
285
288
({ \
@@ -314,6 +317,7 @@ do { \
314
317
})
315
318
316
319
#if defined(CONFIG_X86_32 ) && !defined(CONFIG_UML )
320
+
317
321
#define percpu_cmpxchg64_op (size , qual , _var , _oval , _nval ) \
318
322
({ \
319
323
union { \
@@ -374,7 +378,8 @@ do { \
374
378
375
379
#define raw_cpu_try_cmpxchg64 (pcp , ovalp , nval ) percpu_try_cmpxchg64_op(8, , pcp, ovalp, nval)
376
380
#define this_cpu_try_cmpxchg64 (pcp , ovalp , nval ) percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval)
377
- #endif
381
+
382
+ #endif /* defined(CONFIG_X86_32) && !defined(CONFIG_UML) */
378
383
379
384
#ifdef CONFIG_X86_64
380
385
#define raw_cpu_cmpxchg64 (pcp , oval , nval ) percpu_cmpxchg_op(8, , pcp, oval, nval);
@@ -443,7 +448,8 @@ do { \
443
448
444
449
#define raw_cpu_try_cmpxchg128 (pcp , ovalp , nval ) percpu_try_cmpxchg128_op(16, , pcp, ovalp, nval)
445
450
#define this_cpu_try_cmpxchg128 (pcp , ovalp , nval ) percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval)
446
- #endif
451
+
452
+ #endif /* CONFIG_X86_64 */
447
453
448
454
#define raw_cpu_read_1 (pcp ) __raw_cpu_read(1, , pcp)
449
455
#define raw_cpu_read_2 (pcp ) __raw_cpu_read(2, , pcp)
@@ -510,8 +516,8 @@ do { \
510
516
#define this_cpu_try_cmpxchg_4 (pcp , ovalp , nval ) percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval)
511
517
512
518
/*
513
- * Per cpu atomic 64 bit operations are only available under 64 bit.
514
- * 32 bit must fall back to generic operations.
519
+ * Per-CPU atomic 64- bit operations are only available under 64- bit kernels .
520
+ * 32- bit kernels must fall back to generic operations.
515
521
*/
516
522
#ifdef CONFIG_X86_64
517
523
#define raw_cpu_read_8 (pcp ) __raw_cpu_read(8, , pcp)
@@ -539,20 +545,23 @@ do { \
539
545
#define this_cpu_try_cmpxchg_8 (pcp , ovalp , nval ) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval)
540
546
541
547
#define raw_cpu_read_long (pcp ) raw_cpu_read_8(pcp)
542
- #else
543
- /* There is no generic 64 bit read stable operation for 32 bit targets. */
548
+
549
+ #else /* !CONFIG_X86_64: */
550
+
551
+ /* There is no generic 64-bit read stable operation for 32-bit targets. */
544
552
#define this_cpu_read_stable_8 (pcp ) ({ BUILD_BUG(); (typeof(pcp))0; })
545
553
546
554
#define raw_cpu_read_long (pcp ) raw_cpu_read_4(pcp)
547
- #endif
555
+
556
+ #endif /* CONFIG_X86_64 */
548
557
549
558
#define this_cpu_read_const (pcp ) __raw_cpu_read_const(pcp)
550
559
551
560
/*
552
- * this_cpu_read() makes gcc load the percpu variable every time it is
553
- * accessed while this_cpu_read_stable() allows the value to be cached.
561
+ * this_cpu_read() makes the compiler load the per-CPU variable every time
562
+ * it is accessed while this_cpu_read_stable() allows the value to be cached.
554
563
* this_cpu_read_stable() is more efficient and can be used if its value
555
- * is guaranteed to be valid across cpus . The current users include
564
+ * is guaranteed to be valid across CPUs . The current users include
556
565
* pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
557
566
* actually per-thread variables implemented as per-CPU variables and
558
567
* thus stable for the duration of the respective task.
@@ -626,12 +635,12 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
626
635
627
636
#define early_per_cpu_ptr (_name ) (_name##_early_ptr)
628
637
#define early_per_cpu_map (_name , _idx ) (_name##_early_map[_idx])
629
- #define early_per_cpu (_name , _cpu ) \
638
+ #define early_per_cpu (_name , _cpu ) \
630
639
*(early_per_cpu_ptr(_name) ? \
631
640
&early_per_cpu_ptr(_name)[_cpu] : \
632
641
&per_cpu(_name, _cpu))
633
642
634
- #else /* !CONFIG_SMP */
643
+ #else /* !CONFIG_SMP: */
635
644
#define DEFINE_EARLY_PER_CPU (_type , _name , _initvalue ) \
636
645
DEFINE_PER_CPU(_type, _name) = _initvalue
637
646
@@ -651,6 +660,6 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
651
660
#define early_per_cpu_ptr (_name ) NULL
652
661
/* no early_per_cpu_map() */
653
662
654
- #endif /* ! CONFIG_SMP */
663
+ #endif /* CONFIG_SMP */
655
664
656
665
#endif /* _ASM_X86_PERCPU_H */
0 commit comments