Skip to content

Commit 7016cc5

Browse files
ubizjakIngo Molnar
authored andcommitted
locking/atomic/x86: Modernize x86_32 arch_{,try_}_cmpxchg64{,_local}()
Commit: b23e139 ("arch: Introduce arch_{,try_}_cmpxchg128{,_local}()") introduced arch_{,try_}_cmpxchg128{,_local}() for x86_64 targets. Modernize existing x86_32 arch_{,try_}_cmpxchg64{,_local}() definitions to follow the same structure as the definitions introduced by the above commit. No functional changes intended. Signed-off-by: Uros Bizjak <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 929ad06 commit 7016cc5

File tree

1 file changed

+100
-79
lines changed

1 file changed

+100
-79
lines changed

arch/x86/include/asm/cmpxchg_32.h

Lines changed: 100 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -3,103 +3,124 @@
33
#define _ASM_X86_CMPXCHG_32_H
44

55
/*
6-
* Note: if you use set64_bit(), __cmpxchg64(), or their variants,
6+
* Note: if you use __cmpxchg64(), or their variants,
77
* you need to test for the feature in boot_cpu_data.
88
*/
99

10-
#ifdef CONFIG_X86_CMPXCHG64
11-
#define arch_cmpxchg64(ptr, o, n) \
12-
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
13-
(unsigned long long)(n)))
14-
#define arch_cmpxchg64_local(ptr, o, n) \
15-
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
16-
(unsigned long long)(n)))
17-
#define arch_try_cmpxchg64(ptr, po, n) \
18-
__try_cmpxchg64((ptr), (unsigned long long *)(po), \
19-
(unsigned long long)(n))
20-
#endif
10+
union __u64_halves {
11+
u64 full;
12+
struct {
13+
u32 low, high;
14+
};
15+
};
16+
17+
#define __arch_cmpxchg64(_ptr, _old, _new, _lock) \
18+
({ \
19+
union __u64_halves o = { .full = (_old), }, \
20+
n = { .full = (_new), }; \
21+
\
22+
asm volatile(_lock "cmpxchg8b %[ptr]" \
23+
: [ptr] "+m" (*(_ptr)), \
24+
"+a" (o.low), "+d" (o.high) \
25+
: "b" (n.low), "c" (n.high) \
26+
: "memory"); \
27+
\
28+
o.full; \
29+
})
2130

22-
static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
31+
32+
static __always_inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
2333
{
24-
u64 prev;
25-
asm volatile(LOCK_PREFIX "cmpxchg8b %1"
26-
: "=A" (prev),
27-
"+m" (*ptr)
28-
: "b" ((u32)new),
29-
"c" ((u32)(new >> 32)),
30-
"0" (old)
31-
: "memory");
32-
return prev;
34+
return __arch_cmpxchg64(ptr, old, new, LOCK_PREFIX);
3335
}
3436

35-
static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
37+
static __always_inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
3638
{
37-
u64 prev;
38-
asm volatile("cmpxchg8b %1"
39-
: "=A" (prev),
40-
"+m" (*ptr)
41-
: "b" ((u32)new),
42-
"c" ((u32)(new >> 32)),
43-
"0" (old)
44-
: "memory");
45-
return prev;
39+
return __arch_cmpxchg64(ptr, old, new,);
4640
}
4741

48-
static inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *pold, u64 new)
42+
#define __arch_try_cmpxchg64(_ptr, _oldp, _new, _lock) \
43+
({ \
44+
union __u64_halves o = { .full = *(_oldp), }, \
45+
n = { .full = (_new), }; \
46+
bool ret; \
47+
\
48+
asm volatile(_lock "cmpxchg8b %[ptr]" \
49+
CC_SET(e) \
50+
: CC_OUT(e) (ret), \
51+
[ptr] "+m" (*(_ptr)), \
52+
"+a" (o.low), "+d" (o.high) \
53+
: "b" (n.low), "c" (n.high) \
54+
: "memory"); \
55+
\
56+
if (unlikely(!ret)) \
57+
*(_oldp) = o.full; \
58+
\
59+
likely(ret); \
60+
})
61+
62+
static __always_inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
4963
{
50-
bool success;
51-
u64 old = *pold;
52-
asm volatile(LOCK_PREFIX "cmpxchg8b %[ptr]"
53-
CC_SET(z)
54-
: CC_OUT(z) (success),
55-
[ptr] "+m" (*ptr),
56-
"+A" (old)
57-
: "b" ((u32)new),
58-
"c" ((u32)(new >> 32))
59-
: "memory");
60-
61-
if (unlikely(!success))
62-
*pold = old;
63-
return success;
64+
return __arch_try_cmpxchg64(ptr, oldp, new, LOCK_PREFIX);
6465
}
6566

66-
#ifndef CONFIG_X86_CMPXCHG64
67+
#ifdef CONFIG_X86_CMPXCHG64
68+
69+
#define arch_cmpxchg64 __cmpxchg64
70+
71+
#define arch_cmpxchg64_local __cmpxchg64_local
72+
73+
#define arch_try_cmpxchg64 __try_cmpxchg64
74+
75+
#else
76+
6777
/*
6878
* Building a kernel capable running on 80386 and 80486. It may be necessary
6979
* to simulate the cmpxchg8b on the 80386 and 80486 CPU.
7080
*/
7181

72-
#define arch_cmpxchg64(ptr, o, n) \
73-
({ \
74-
__typeof__(*(ptr)) __ret; \
75-
__typeof__(*(ptr)) __old = (o); \
76-
__typeof__(*(ptr)) __new = (n); \
77-
alternative_io(LOCK_PREFIX_HERE \
78-
"call cmpxchg8b_emu", \
79-
"lock; cmpxchg8b (%%esi)" , \
80-
X86_FEATURE_CX8, \
81-
"=A" (__ret), \
82-
"S" ((ptr)), "0" (__old), \
83-
"b" ((unsigned int)__new), \
84-
"c" ((unsigned int)(__new>>32)) \
85-
: "memory"); \
86-
__ret; })
87-
88-
89-
#define arch_cmpxchg64_local(ptr, o, n) \
90-
({ \
91-
__typeof__(*(ptr)) __ret; \
92-
__typeof__(*(ptr)) __old = (o); \
93-
__typeof__(*(ptr)) __new = (n); \
94-
alternative_io("call cmpxchg8b_emu", \
95-
"cmpxchg8b (%%esi)" , \
96-
X86_FEATURE_CX8, \
97-
"=A" (__ret), \
98-
"S" ((ptr)), "0" (__old), \
99-
"b" ((unsigned int)__new), \
100-
"c" ((unsigned int)(__new>>32)) \
101-
: "memory"); \
102-
__ret; })
82+
#define __arch_cmpxchg64_emu(_ptr, _old, _new) \
83+
({ \
84+
union __u64_halves o = { .full = (_old), }, \
85+
n = { .full = (_new), }; \
86+
\
87+
asm volatile(ALTERNATIVE(LOCK_PREFIX_HERE \
88+
"call cmpxchg8b_emu", \
89+
"lock; cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
90+
: [ptr] "+m" (*(_ptr)), \
91+
"+a" (o.low), "+d" (o.high) \
92+
: "b" (n.low), "c" (n.high), "S" (_ptr) \
93+
: "memory"); \
94+
\
95+
o.full; \
96+
})
97+
98+
static __always_inline u64 arch_cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
99+
{
100+
return __arch_cmpxchg64_emu(ptr, old, new);
101+
}
102+
#define arch_cmpxchg64 arch_cmpxchg64
103+
104+
#define __arch_cmpxchg64_emu_local(_ptr, _old, _new) \
105+
({ \
106+
union __u64_halves o = { .full = (_old), }, \
107+
n = { .full = (_new), }; \
108+
\
109+
asm volatile(ALTERNATIVE("call cmpxchg8b_emu", \
110+
"cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
111+
: [ptr] "+m" (*(_ptr)), \
112+
"+a" (o.low), "+d" (o.high) \
113+
: "b" (n.low), "c" (n.high), "S" (_ptr) \
114+
: "memory"); \
115+
\
116+
o.full; \
117+
})
118+
119+
static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
120+
{
121+
return __arch_cmpxchg64_emu_local(ptr, old, new);
122+
}
123+
#define arch_cmpxchg64_local arch_cmpxchg64_local
103124

104125
#endif
105126

0 commit comments

Comments
 (0)