Skip to content

Commit 023f329

Browse files
ubizjakIngo Molnar
authored andcommitted
x86/locking: Remove semicolon from "lock" prefix
Minimum version of binutils required to compile the kernel is 2.25. This version correctly handles the "lock" prefix, so it is possible to remove the semicolon, which was used to support ancient versions of GNU as. Due to the semicolon, the compiler considers "lock; insn" as two separate instructions. Removing the semicolon makes asm length calculations more accurate, consequently making scheduling and inlining decisions of the compiler more accurate. Removing the semicolon also enables assembler checks involving lock prefix. Trying to assemble e.g. "lock andl %eax, %ebx" results in: Error: expecting lockable instruction after `lock' Signed-off-by: Uros Bizjak <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 337369f commit 023f329

File tree

6 files changed

+16
-16
lines changed

6 files changed

+16
-16
lines changed

arch/x86/include/asm/alternative.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848
".popsection\n" \
4949
"671:"
5050

51-
#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
51+
#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock "
5252

5353
#else /* ! CONFIG_SMP */
5454
#define LOCK_PREFIX_HERE ""

arch/x86/include/asm/barrier.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,11 @@
1212
*/
1313

1414
#ifdef CONFIG_X86_32
15-
#define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
15+
#define mb() asm volatile(ALTERNATIVE("lock addl $0,-4(%%esp)", "mfence", \
1616
X86_FEATURE_XMM2) ::: "memory", "cc")
17-
#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
17+
#define rmb() asm volatile(ALTERNATIVE("lock addl $0,-4(%%esp)", "lfence", \
1818
X86_FEATURE_XMM2) ::: "memory", "cc")
19-
#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
19+
#define wmb() asm volatile(ALTERNATIVE("lock addl $0,-4(%%esp)", "sfence", \
2020
X86_FEATURE_XMM2) ::: "memory", "cc")
2121
#else
2222
#define __mb() asm volatile("mfence":::"memory")
@@ -50,7 +50,7 @@
5050
#define __dma_rmb() barrier()
5151
#define __dma_wmb() barrier()
5252

53-
#define __smp_mb() asm volatile("lock; addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
53+
#define __smp_mb() asm volatile("lock addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
5454

5555
#define __smp_rmb() dma_rmb()
5656
#define __smp_wmb() barrier()

arch/x86/include/asm/cmpxchg.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ extern void __add_wrong_size(void)
134134
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
135135

136136
#define __sync_cmpxchg(ptr, old, new, size) \
137-
__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
137+
__raw_cmpxchg((ptr), (old), (new), (size), "lock ")
138138

139139
#define __cmpxchg_local(ptr, old, new, size) \
140140
__raw_cmpxchg((ptr), (old), (new), (size), "")
@@ -222,7 +222,7 @@ extern void __add_wrong_size(void)
222222
__raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)
223223

224224
#define __sync_try_cmpxchg(ptr, pold, new, size) \
225-
__raw_try_cmpxchg((ptr), (pold), (new), (size), "lock; ")
225+
__raw_try_cmpxchg((ptr), (pold), (new), (size), "lock ")
226226

227227
#define __try_cmpxchg_local(ptr, pold, new, size) \
228228
__raw_try_cmpxchg((ptr), (pold), (new), (size), "")

arch/x86/include/asm/cmpxchg_32.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
105105

106106
static __always_inline u64 arch_cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
107107
{
108-
return __arch_cmpxchg64_emu(ptr, old, new, LOCK_PREFIX_HERE, "lock; ");
108+
return __arch_cmpxchg64_emu(ptr, old, new, LOCK_PREFIX_HERE, "lock ");
109109
}
110110
#define arch_cmpxchg64 arch_cmpxchg64
111111

@@ -140,7 +140,7 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64
140140

141141
static __always_inline bool arch_try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
142142
{
143-
return __arch_try_cmpxchg64_emu(ptr, oldp, new, LOCK_PREFIX_HERE, "lock; ");
143+
return __arch_try_cmpxchg64_emu(ptr, oldp, new, LOCK_PREFIX_HERE, "lock ");
144144
}
145145
#define arch_try_cmpxchg64 arch_try_cmpxchg64
146146

arch/x86/include/asm/edac.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ static inline void edac_atomic_scrub(void *va, u32 size)
1313
* are interrupt, DMA and SMP safe.
1414
*/
1515
for (i = 0; i < size / 4; i++, virt_addr++)
16-
asm volatile("lock; addl $0, %0"::"m" (*virt_addr));
16+
asm volatile("lock addl $0, %0"::"m" (*virt_addr));
1717
}
1818

1919
#endif /* _ASM_X86_EDAC_H */

arch/x86/include/asm/sync_bitops.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
*/
3232
static inline void sync_set_bit(long nr, volatile unsigned long *addr)
3333
{
34-
asm volatile("lock; " __ASM_SIZE(bts) " %1,%0"
34+
asm volatile("lock " __ASM_SIZE(bts) " %1,%0"
3535
: "+m" (ADDR)
3636
: "Ir" (nr)
3737
: "memory");
@@ -49,7 +49,7 @@ static inline void sync_set_bit(long nr, volatile unsigned long *addr)
4949
*/
5050
static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
5151
{
52-
asm volatile("lock; " __ASM_SIZE(btr) " %1,%0"
52+
asm volatile("lock " __ASM_SIZE(btr) " %1,%0"
5353
: "+m" (ADDR)
5454
: "Ir" (nr)
5555
: "memory");
@@ -66,7 +66,7 @@ static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
6666
*/
6767
static inline void sync_change_bit(long nr, volatile unsigned long *addr)
6868
{
69-
asm volatile("lock; " __ASM_SIZE(btc) " %1,%0"
69+
asm volatile("lock " __ASM_SIZE(btc) " %1,%0"
7070
: "+m" (ADDR)
7171
: "Ir" (nr)
7272
: "memory");
@@ -82,7 +82,7 @@ static inline void sync_change_bit(long nr, volatile unsigned long *addr)
8282
*/
8383
static inline bool sync_test_and_set_bit(long nr, volatile unsigned long *addr)
8484
{
85-
return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(bts), *addr, c, "Ir", nr);
85+
return GEN_BINARY_RMWcc("lock " __ASM_SIZE(bts), *addr, c, "Ir", nr);
8686
}
8787

8888
/**
@@ -95,7 +95,7 @@ static inline bool sync_test_and_set_bit(long nr, volatile unsigned long *addr)
9595
*/
9696
static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
9797
{
98-
return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btr), *addr, c, "Ir", nr);
98+
return GEN_BINARY_RMWcc("lock " __ASM_SIZE(btr), *addr, c, "Ir", nr);
9999
}
100100

101101
/**
@@ -108,7 +108,7 @@ static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
108108
*/
109109
static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
110110
{
111-
return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btc), *addr, c, "Ir", nr);
111+
return GEN_BINARY_RMWcc("lock " __ASM_SIZE(btc), *addr, c, "Ir", nr);
112112
}
113113

114114
#define sync_test_bit(nr, addr) test_bit(nr, addr)

0 commit comments

Comments
 (0)