Skip to content

Commit 8e6082e

Browse files
mrutland-armctmarinas
authored andcommitted
arm64: atomics: format whitespace consistently
The code for the atomic ops is formatted inconsistently, and while this is not a functional problem it is rather distracting when working on them. Some have ops have consistent indentation, e.g. | #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ | static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ | { \ | u32 tmp; \ | \ | asm volatile( \ | __LSE_PREAMBLE \ | " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ | " add %w[i], %w[i], %w[tmp]" \ | : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ | : "r" (v) \ | : cl); \ | \ | return i; \ | } While others have negative indentation for some lines, and/or have misaligned trailing backslashes, e.g. | static inline void __lse_atomic_##op(int i, atomic_t *v) \ | { \ | asm volatile( \ | __LSE_PREAMBLE \ | " " #asm_op " %w[i], %[v]\n" \ | : [i] "+r" (i), [v] "+Q" (v->counter) \ | : "r" (v)); \ | } This patch makes the indentation consistent and also aligns the trailing backslashes. This makes the code easier to read for those (like myself) who are easily distracted by these inconsistencies. This is intended as a cleanup. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <[email protected]> Cc: Boqun Feng <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Will Deacon <[email protected]> Acked-by: Will Deacon <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Catalin Marinas <[email protected]>
1 parent d58071a commit 8e6082e

File tree

2 files changed

+50
-50
lines changed

2 files changed

+50
-50
lines changed

arch/arm64/include/asm/atomic_ll_sc.h

Lines changed: 43 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -44,11 +44,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \
4444
\
4545
asm volatile("// atomic_" #op "\n" \
4646
__LL_SC_FALLBACK( \
47-
" prfm pstl1strm, %2\n" \
48-
"1: ldxr %w0, %2\n" \
49-
" " #asm_op " %w0, %w0, %w3\n" \
50-
" stxr %w1, %w0, %2\n" \
51-
" cbnz %w1, 1b\n") \
47+
" prfm pstl1strm, %2\n" \
48+
"1: ldxr %w0, %2\n" \
49+
" " #asm_op " %w0, %w0, %w3\n" \
50+
" stxr %w1, %w0, %2\n" \
51+
" cbnz %w1, 1b\n") \
5252
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
5353
: __stringify(constraint) "r" (i)); \
5454
}
@@ -62,12 +62,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
6262
\
6363
asm volatile("// atomic_" #op "_return" #name "\n" \
6464
__LL_SC_FALLBACK( \
65-
" prfm pstl1strm, %2\n" \
66-
"1: ld" #acq "xr %w0, %2\n" \
67-
" " #asm_op " %w0, %w0, %w3\n" \
68-
" st" #rel "xr %w1, %w0, %2\n" \
69-
" cbnz %w1, 1b\n" \
70-
" " #mb ) \
65+
" prfm pstl1strm, %2\n" \
66+
"1: ld" #acq "xr %w0, %2\n" \
67+
" " #asm_op " %w0, %w0, %w3\n" \
68+
" st" #rel "xr %w1, %w0, %2\n" \
69+
" cbnz %w1, 1b\n" \
70+
" " #mb ) \
7171
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
7272
: __stringify(constraint) "r" (i) \
7373
: cl); \
@@ -84,12 +84,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
8484
\
8585
asm volatile("// atomic_fetch_" #op #name "\n" \
8686
__LL_SC_FALLBACK( \
87-
" prfm pstl1strm, %3\n" \
88-
"1: ld" #acq "xr %w0, %3\n" \
89-
" " #asm_op " %w1, %w0, %w4\n" \
90-
" st" #rel "xr %w2, %w1, %3\n" \
91-
" cbnz %w2, 1b\n" \
92-
" " #mb ) \
87+
" prfm pstl1strm, %3\n" \
88+
"1: ld" #acq "xr %w0, %3\n" \
89+
" " #asm_op " %w1, %w0, %w4\n" \
90+
" st" #rel "xr %w2, %w1, %3\n" \
91+
" cbnz %w2, 1b\n" \
92+
" " #mb ) \
9393
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
9494
: __stringify(constraint) "r" (i) \
9595
: cl); \
@@ -143,11 +143,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
143143
\
144144
asm volatile("// atomic64_" #op "\n" \
145145
__LL_SC_FALLBACK( \
146-
" prfm pstl1strm, %2\n" \
147-
"1: ldxr %0, %2\n" \
148-
" " #asm_op " %0, %0, %3\n" \
149-
" stxr %w1, %0, %2\n" \
150-
" cbnz %w1, 1b") \
146+
" prfm pstl1strm, %2\n" \
147+
"1: ldxr %0, %2\n" \
148+
" " #asm_op " %0, %0, %3\n" \
149+
" stxr %w1, %0, %2\n" \
150+
" cbnz %w1, 1b") \
151151
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
152152
: __stringify(constraint) "r" (i)); \
153153
}
@@ -161,12 +161,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
161161
\
162162
asm volatile("// atomic64_" #op "_return" #name "\n" \
163163
__LL_SC_FALLBACK( \
164-
" prfm pstl1strm, %2\n" \
165-
"1: ld" #acq "xr %0, %2\n" \
166-
" " #asm_op " %0, %0, %3\n" \
167-
" st" #rel "xr %w1, %0, %2\n" \
168-
" cbnz %w1, 1b\n" \
169-
" " #mb ) \
164+
" prfm pstl1strm, %2\n" \
165+
"1: ld" #acq "xr %0, %2\n" \
166+
" " #asm_op " %0, %0, %3\n" \
167+
" st" #rel "xr %w1, %0, %2\n" \
168+
" cbnz %w1, 1b\n" \
169+
" " #mb ) \
170170
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
171171
: __stringify(constraint) "r" (i) \
172172
: cl); \
@@ -176,19 +176,19 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
176176

177177
#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
178178
static inline long \
179-
__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
179+
__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
180180
{ \
181181
s64 result, val; \
182182
unsigned long tmp; \
183183
\
184184
asm volatile("// atomic64_fetch_" #op #name "\n" \
185185
__LL_SC_FALLBACK( \
186-
" prfm pstl1strm, %3\n" \
187-
"1: ld" #acq "xr %0, %3\n" \
188-
" " #asm_op " %1, %0, %4\n" \
189-
" st" #rel "xr %w2, %1, %3\n" \
190-
" cbnz %w2, 1b\n" \
191-
" " #mb ) \
186+
" prfm pstl1strm, %3\n" \
187+
"1: ld" #acq "xr %0, %3\n" \
188+
" " #asm_op " %1, %0, %4\n" \
189+
" st" #rel "xr %w2, %1, %3\n" \
190+
" cbnz %w2, 1b\n" \
191+
" " #mb ) \
192192
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
193193
: __stringify(constraint) "r" (i) \
194194
: cl); \
@@ -241,14 +241,14 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
241241

242242
asm volatile("// atomic64_dec_if_positive\n"
243243
__LL_SC_FALLBACK(
244-
" prfm pstl1strm, %2\n"
245-
"1: ldxr %0, %2\n"
246-
" subs %0, %0, #1\n"
247-
" b.lt 2f\n"
248-
" stlxr %w1, %0, %2\n"
249-
" cbnz %w1, 1b\n"
250-
" dmb ish\n"
251-
"2:")
244+
" prfm pstl1strm, %2\n"
245+
"1: ldxr %0, %2\n"
246+
" subs %0, %0, #1\n"
247+
" b.lt 2f\n"
248+
" stlxr %w1, %0, %2\n"
249+
" cbnz %w1, 1b\n"
250+
" dmb ish\n"
251+
"2:")
252252
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
253253
:
254254
: "cc", "memory");

arch/arm64/include/asm/atomic_lse.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,11 @@
1111
#define __ASM_ATOMIC_LSE_H
1212

1313
#define ATOMIC_OP(op, asm_op) \
14-
static inline void __lse_atomic_##op(int i, atomic_t *v) \
14+
static inline void __lse_atomic_##op(int i, atomic_t *v) \
1515
{ \
1616
asm volatile( \
1717
__LSE_PREAMBLE \
18-
" " #asm_op " %w[i], %[v]\n" \
18+
" " #asm_op " %w[i], %[v]\n" \
1919
: [i] "+r" (i), [v] "+Q" (v->counter) \
2020
: "r" (v)); \
2121
}
@@ -32,7 +32,7 @@ static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
3232
{ \
3333
asm volatile( \
3434
__LSE_PREAMBLE \
35-
" " #asm_op #mb " %w[i], %w[i], %[v]" \
35+
" " #asm_op #mb " %w[i], %w[i], %[v]" \
3636
: [i] "+r" (i), [v] "+Q" (v->counter) \
3737
: "r" (v) \
3838
: cl); \
@@ -130,7 +130,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
130130
" add %w[i], %w[i], %w[tmp]" \
131131
: [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
132132
: "r" (v) \
133-
: cl); \
133+
: cl); \
134134
\
135135
return i; \
136136
}
@@ -168,7 +168,7 @@ static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
168168
{ \
169169
asm volatile( \
170170
__LSE_PREAMBLE \
171-
" " #asm_op " %[i], %[v]\n" \
171+
" " #asm_op " %[i], %[v]\n" \
172172
: [i] "+r" (i), [v] "+Q" (v->counter) \
173173
: "r" (v)); \
174174
}
@@ -185,7 +185,7 @@ static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
185185
{ \
186186
asm volatile( \
187187
__LSE_PREAMBLE \
188-
" " #asm_op #mb " %[i], %[i], %[v]" \
188+
" " #asm_op #mb " %[i], %[i], %[v]" \
189189
: [i] "+r" (i), [v] "+Q" (v->counter) \
190190
: "r" (v) \
191191
: cl); \
@@ -272,7 +272,7 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
272272
}
273273

274274
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
275-
static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
275+
static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
276276
{ \
277277
unsigned long tmp; \
278278
\

0 commit comments

Comments
 (0)