Skip to content

Commit 61d73e4

Browse files
author
Ingo Molnar
committed
x86/percpu: Clean up <asm/percpu.h> a bit
- Fix misc typos - There's 4 variants of the same spelling right now: 'per-CPU', 'per CPU', 'percpu' and 'per-cpu' Standardize on 'per-CPU' only. - s/makes gcc load /makes the compiler load - Instead of: #ifdef CONFIG_XXXX #define YYYY FOO #else #define YYYY BAR #endif Use the slightly more readable form of: #ifdef CONFIG_XXXX # define YYYY FOO #else # define YYYY BAR #endif - Standardize & expand '#else' and '#endif' comments - Fix comment style - Capitalize x86 instruction names in comments No change in code. Signed-off-by: Ingo Molnar <[email protected]> Cc: Uros Bizjak <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Josh Poimboeuf <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: [email protected]
1 parent 47c9dbd commit 61d73e4

File tree

1 file changed

+50
-41
lines changed

1 file changed

+50
-41
lines changed

arch/x86/include/asm/percpu.h

Lines changed: 50 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -3,30 +3,30 @@
33
#define _ASM_X86_PERCPU_H
44

55
#ifdef CONFIG_X86_64
6-
#define __percpu_seg gs
7-
#define __percpu_rel (%rip)
6+
# define __percpu_seg gs
7+
# define __percpu_rel (%rip)
88
#else
9-
#define __percpu_seg fs
10-
#define __percpu_rel
9+
# define __percpu_seg fs
10+
# define __percpu_rel
1111
#endif
1212

1313
#ifdef __ASSEMBLY__
1414

1515
#ifdef CONFIG_SMP
16-
#define __percpu %__percpu_seg:
16+
# define __percpu %__percpu_seg:
1717
#else
18-
#define __percpu
18+
# define __percpu
1919
#endif
2020

2121
#define PER_CPU_VAR(var) __percpu(var)__percpu_rel
2222

2323
#ifdef CONFIG_X86_64_SMP
24-
#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
24+
# define INIT_PER_CPU_VAR(var) init_per_cpu__##var
2525
#else
26-
#define INIT_PER_CPU_VAR(var) var
26+
# define INIT_PER_CPU_VAR(var) var
2727
#endif
2828

29-
#else /* ...!ASSEMBLY */
29+
#else /* !__ASSEMBLY__: */
3030

3131
#include <linux/build_bug.h>
3232
#include <linux/stringify.h>
@@ -37,19 +37,19 @@
3737
#ifdef CONFIG_CC_HAS_NAMED_AS
3838

3939
#ifdef __CHECKER__
40-
#define __seg_gs __attribute__((address_space(__seg_gs)))
41-
#define __seg_fs __attribute__((address_space(__seg_fs)))
40+
# define __seg_gs __attribute__((address_space(__seg_gs)))
41+
# define __seg_fs __attribute__((address_space(__seg_fs)))
4242
#endif
4343

4444
#ifdef CONFIG_X86_64
45-
#define __percpu_seg_override __seg_gs
45+
# define __percpu_seg_override __seg_gs
4646
#else
47-
#define __percpu_seg_override __seg_fs
47+
# define __percpu_seg_override __seg_fs
4848
#endif
4949

5050
#define __percpu_prefix ""
5151

52-
#else /* CONFIG_CC_HAS_NAMED_AS */
52+
#else /* !CONFIG_CC_HAS_NAMED_AS: */
5353

5454
#define __percpu_seg_override
5555
#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
@@ -80,7 +80,8 @@
8080

8181
#define PER_CPU_VAR(var) %__percpu_seg:(var)__percpu_rel
8282

83-
#else /* CONFIG_SMP */
83+
#else /* !CONFIG_SMP: */
84+
8485
#define __percpu_seg_override
8586
#define __percpu_prefix ""
8687
#define __force_percpu_prefix ""
@@ -96,7 +97,7 @@
9697
#define __force_percpu_arg(x) __force_percpu_prefix "%" #x
9798

9899
/*
99-
* Initialized pointers to per-cpu variables needed for the boot
100+
* Initialized pointers to per-CPU variables needed for the boot
100101
* processor need to use these macros to get the proper address
101102
* offset from __per_cpu_load on SMP.
102103
*
@@ -106,13 +107,15 @@
106107
extern typeof(var) init_per_cpu_var(var)
107108

108109
#ifdef CONFIG_X86_64_SMP
109-
#define init_per_cpu_var(var) init_per_cpu__##var
110+
# define init_per_cpu_var(var) init_per_cpu__##var
110111
#else
111-
#define init_per_cpu_var(var) var
112+
# define init_per_cpu_var(var) var
112113
#endif
113114

114-
/* For arch-specific code, we can use direct single-insn ops (they
115-
* don't give an lvalue though). */
115+
/*
116+
* For arch-specific code, we can use direct single-insn ops (they
117+
* don't give an lvalue though).
118+
*/
116119

117120
#define __pcpu_type_1 u8
118121
#define __pcpu_type_2 u16
@@ -158,7 +161,7 @@ do { \
158161

159162
#define __raw_cpu_read_const(pcp) __raw_cpu_read(, , pcp)
160163

161-
#else /* CONFIG_USE_X86_SEG_SUPPORT */
164+
#else /* !CONFIG_USE_X86_SEG_SUPPORT: */
162165

163166
#define __raw_cpu_read(size, qual, _var) \
164167
({ \
@@ -183,7 +186,7 @@ do { \
183186
} while (0)
184187

185188
/*
186-
* The generic per-cpu infrastrucutre is not suitable for
189+
* The generic per-CPU infrastrucutre is not suitable for
187190
* reading const-qualified variables.
188191
*/
189192
#define __raw_cpu_read_const(pcp) ({ BUILD_BUG(); (typeof(pcp))0; })
@@ -219,7 +222,7 @@ do { \
219222
} while (0)
220223

221224
/*
222-
* Generate a percpu add to memory instruction and optimize code
225+
* Generate a per-CPU add to memory instruction and optimize code
223226
* if one is added or subtracted.
224227
*/
225228
#define percpu_add_op(size, qual, var, val) \
@@ -266,9 +269,9 @@ do { \
266269
})
267270

268271
/*
269-
* this_cpu_xchg() is implemented using cmpxchg without a lock prefix.
270-
* xchg is expensive due to the implied lock prefix. The processor
271-
* cannot prefetch cachelines if xchg is used.
272+
* this_cpu_xchg() is implemented using CMPXCHG without a LOCK prefix.
273+
* XCHG is expensive due to the implied LOCK prefix. The processor
274+
* cannot prefetch cachelines if XCHG is used.
272275
*/
273276
#define this_percpu_xchg_op(_var, _nval) \
274277
({ \
@@ -278,8 +281,8 @@ do { \
278281
})
279282

280283
/*
281-
* cmpxchg has no such implied lock semantics as a result it is much
282-
* more efficient for cpu local operations.
284+
* CMPXCHG has no such implied lock semantics as a result it is much
285+
* more efficient for CPU-local operations.
283286
*/
284287
#define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \
285288
({ \
@@ -314,6 +317,7 @@ do { \
314317
})
315318

316319
#if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
320+
317321
#define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval) \
318322
({ \
319323
union { \
@@ -374,7 +378,8 @@ do { \
374378

375379
#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, , pcp, ovalp, nval)
376380
#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval)
377-
#endif
381+
382+
#endif /* defined(CONFIG_X86_32) && !defined(CONFIG_UML) */
378383

379384
#ifdef CONFIG_X86_64
380385
#define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval);
@@ -443,7 +448,8 @@ do { \
443448

444449
#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, , pcp, ovalp, nval)
445450
#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval)
446-
#endif
451+
452+
#endif /* CONFIG_X86_64 */
447453

448454
#define raw_cpu_read_1(pcp) __raw_cpu_read(1, , pcp)
449455
#define raw_cpu_read_2(pcp) __raw_cpu_read(2, , pcp)
@@ -510,8 +516,8 @@ do { \
510516
#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval)
511517

512518
/*
513-
* Per cpu atomic 64 bit operations are only available under 64 bit.
514-
* 32 bit must fall back to generic operations.
519+
* Per-CPU atomic 64-bit operations are only available under 64-bit kernels.
520+
* 32-bit kernels must fall back to generic operations.
515521
*/
516522
#ifdef CONFIG_X86_64
517523
#define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp)
@@ -539,20 +545,23 @@ do { \
539545
#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval)
540546

541547
#define raw_cpu_read_long(pcp) raw_cpu_read_8(pcp)
542-
#else
543-
/* There is no generic 64 bit read stable operation for 32 bit targets. */
548+
549+
#else /* !CONFIG_X86_64: */
550+
551+
/* There is no generic 64-bit read stable operation for 32-bit targets. */
544552
#define this_cpu_read_stable_8(pcp) ({ BUILD_BUG(); (typeof(pcp))0; })
545553

546554
#define raw_cpu_read_long(pcp) raw_cpu_read_4(pcp)
547-
#endif
555+
556+
#endif /* CONFIG_X86_64 */
548557

549558
#define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp)
550559

551560
/*
552-
* this_cpu_read() makes gcc load the percpu variable every time it is
553-
* accessed while this_cpu_read_stable() allows the value to be cached.
561+
* this_cpu_read() makes the compiler load the per-CPU variable every time
562+
* it is accessed while this_cpu_read_stable() allows the value to be cached.
554563
* this_cpu_read_stable() is more efficient and can be used if its value
555-
* is guaranteed to be valid across cpus. The current users include
564+
* is guaranteed to be valid across CPUs. The current users include
556565
* pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
557566
* actually per-thread variables implemented as per-CPU variables and
558567
* thus stable for the duration of the respective task.
@@ -626,12 +635,12 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
626635

627636
#define early_per_cpu_ptr(_name) (_name##_early_ptr)
628637
#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
629-
#define early_per_cpu(_name, _cpu) \
638+
#define early_per_cpu(_name, _cpu) \
630639
*(early_per_cpu_ptr(_name) ? \
631640
&early_per_cpu_ptr(_name)[_cpu] : \
632641
&per_cpu(_name, _cpu))
633642

634-
#else /* !CONFIG_SMP */
643+
#else /* !CONFIG_SMP: */
635644
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
636645
DEFINE_PER_CPU(_type, _name) = _initvalue
637646

@@ -651,6 +660,6 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
651660
#define early_per_cpu_ptr(_name) NULL
652661
/* no early_per_cpu_map() */
653662

654-
#endif /* !CONFIG_SMP */
663+
#endif /* CONFIG_SMP */
655664

656665
#endif /* _ASM_X86_PERCPU_H */

0 commit comments

Comments
 (0)