Skip to content

Commit e0d5896

Browse files
samitolvanenwilldeacon
authored andcommitted
arm64: lse: fix LSE atomics with LLVM's integrated assembler
Unlike gcc, clang considers each inline assembly block to be independent and therefore, when using the integrated assembler for inline assembly, any preambles that enable features must be repeated in each block. This change defines __LSE_PREAMBLE and adds it to each inline assembly block that has LSE instructions, which allows them to be compiled also with clang's assembler. Link: ClangBuiltLinux#671 Signed-off-by: Sami Tolvanen <[email protected]> Tested-by: Andrew Murray <[email protected]> Tested-by: Kees Cook <[email protected]> Reviewed-by: Andrew Murray <[email protected]> Reviewed-by: Kees Cook <[email protected]> Reviewed-by: Nick Desaulniers <[email protected]> Signed-off-by: Will Deacon <[email protected]>
1 parent 46cf053 commit e0d5896

File tree

2 files changed

+22
-3
lines changed

2 files changed

+22
-3
lines changed

arch/arm64/include/asm/atomic_lse.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
static inline void __lse_atomic_##op(int i, atomic_t *v) \
1515
{ \
1616
asm volatile( \
17+
__LSE_PREAMBLE \
1718
" " #asm_op " %w[i], %[v]\n" \
1819
: [i] "+r" (i), [v] "+Q" (v->counter) \
1920
: "r" (v)); \
@@ -30,6 +31,7 @@ ATOMIC_OP(add, stadd)
3031
static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
3132
{ \
3233
asm volatile( \
34+
__LSE_PREAMBLE \
3335
" " #asm_op #mb " %w[i], %w[i], %[v]" \
3436
: [i] "+r" (i), [v] "+Q" (v->counter) \
3537
: "r" (v) \
@@ -58,6 +60,7 @@ static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
5860
u32 tmp; \
5961
\
6062
asm volatile( \
63+
__LSE_PREAMBLE \
6164
" ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
6265
" add %w[i], %w[i], %w[tmp]" \
6366
: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
@@ -77,6 +80,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory")
7780
static inline void __lse_atomic_and(int i, atomic_t *v)
7881
{
7982
asm volatile(
83+
__LSE_PREAMBLE
8084
" mvn %w[i], %w[i]\n"
8185
" stclr %w[i], %[v]"
8286
: [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -87,6 +91,7 @@ static inline void __lse_atomic_and(int i, atomic_t *v)
8791
static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \
8892
{ \
8993
asm volatile( \
94+
__LSE_PREAMBLE \
9095
" mvn %w[i], %w[i]\n" \
9196
" ldclr" #mb " %w[i], %w[i], %[v]" \
9297
: [i] "+&r" (i), [v] "+Q" (v->counter) \
@@ -106,6 +111,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
106111
static inline void __lse_atomic_sub(int i, atomic_t *v)
107112
{
108113
asm volatile(
114+
__LSE_PREAMBLE
109115
" neg %w[i], %w[i]\n"
110116
" stadd %w[i], %[v]"
111117
: [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -118,6 +124,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
118124
u32 tmp; \
119125
\
120126
asm volatile( \
127+
__LSE_PREAMBLE \
121128
" neg %w[i], %w[i]\n" \
122129
" ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
123130
" add %w[i], %w[i], %w[tmp]" \
@@ -139,6 +146,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory")
139146
static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
140147
{ \
141148
asm volatile( \
149+
__LSE_PREAMBLE \
142150
" neg %w[i], %w[i]\n" \
143151
" ldadd" #mb " %w[i], %w[i], %[v]" \
144152
: [i] "+&r" (i), [v] "+Q" (v->counter) \
@@ -159,6 +167,7 @@ ATOMIC_FETCH_OP_SUB( , al, "memory")
159167
static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
160168
{ \
161169
asm volatile( \
170+
__LSE_PREAMBLE \
162171
" " #asm_op " %[i], %[v]\n" \
163172
: [i] "+r" (i), [v] "+Q" (v->counter) \
164173
: "r" (v)); \
@@ -175,6 +184,7 @@ ATOMIC64_OP(add, stadd)
175184
static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
176185
{ \
177186
asm volatile( \
187+
__LSE_PREAMBLE \
178188
" " #asm_op #mb " %[i], %[i], %[v]" \
179189
: [i] "+r" (i), [v] "+Q" (v->counter) \
180190
: "r" (v) \
@@ -203,6 +213,7 @@ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
203213
unsigned long tmp; \
204214
\
205215
asm volatile( \
216+
__LSE_PREAMBLE \
206217
" ldadd" #mb " %[i], %x[tmp], %[v]\n" \
207218
" add %[i], %[i], %x[tmp]" \
208219
: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
@@ -222,6 +233,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory")
222233
static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
223234
{
224235
asm volatile(
236+
__LSE_PREAMBLE
225237
" mvn %[i], %[i]\n"
226238
" stclr %[i], %[v]"
227239
: [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -232,6 +244,7 @@ static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
232244
static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
233245
{ \
234246
asm volatile( \
247+
__LSE_PREAMBLE \
235248
" mvn %[i], %[i]\n" \
236249
" ldclr" #mb " %[i], %[i], %[v]" \
237250
: [i] "+&r" (i), [v] "+Q" (v->counter) \
@@ -251,6 +264,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
251264
static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
252265
{
253266
asm volatile(
267+
__LSE_PREAMBLE
254268
" neg %[i], %[i]\n"
255269
" stadd %[i], %[v]"
256270
: [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -263,6 +277,7 @@ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
263277
unsigned long tmp; \
264278
\
265279
asm volatile( \
280+
__LSE_PREAMBLE \
266281
" neg %[i], %[i]\n" \
267282
" ldadd" #mb " %[i], %x[tmp], %[v]\n" \
268283
" add %[i], %[i], %x[tmp]" \
@@ -284,6 +299,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
284299
static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
285300
{ \
286301
asm volatile( \
302+
__LSE_PREAMBLE \
287303
" neg %[i], %[i]\n" \
288304
" ldadd" #mb " %[i], %[i], %[v]" \
289305
: [i] "+&r" (i), [v] "+Q" (v->counter) \
@@ -305,6 +321,7 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
305321
unsigned long tmp;
306322

307323
asm volatile(
324+
__LSE_PREAMBLE
308325
"1: ldr %x[tmp], %[v]\n"
309326
" subs %[ret], %x[tmp], #1\n"
310327
" b.lt 2f\n"
@@ -332,6 +349,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
332349
unsigned long tmp; \
333350
\
334351
asm volatile( \
352+
__LSE_PREAMBLE \
335353
" mov %" #w "[tmp], %" #w "[old]\n" \
336354
" cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
337355
" mov %" #w "[ret], %" #w "[tmp]" \
@@ -379,6 +397,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \
379397
register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
380398
\
381399
asm volatile( \
400+
__LSE_PREAMBLE \
382401
" casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
383402
" eor %[old1], %[old1], %[oldval1]\n" \
384403
" eor %[old2], %[old2], %[oldval2]\n" \

arch/arm64/include/asm/lse.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66

77
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
88

9+
#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
10+
911
#include <linux/compiler_types.h>
1012
#include <linux/export.h>
1113
#include <linux/jump_label.h>
@@ -14,8 +16,6 @@
1416
#include <asm/atomic_lse.h>
1517
#include <asm/cpucaps.h>
1618

17-
__asm__(".arch_extension lse");
18-
1919
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
2020
extern struct static_key_false arm64_const_caps_ready;
2121

@@ -34,7 +34,7 @@ static inline bool system_uses_lse_atomics(void)
3434

3535
/* In-line patching at runtime */
3636
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
37-
ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
37+
ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
3838

3939
#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
4040

0 commit comments

Comments
 (0)