14
14
static inline void __lse_atomic_##op(int i, atomic_t *v) \
15
15
{ \
16
16
asm volatile( \
17
+ __LSE_PREAMBLE \
17
18
" " #asm_op " %w[i], %[v]\n" \
18
19
: [i] "+r" (i), [v] "+Q" (v->counter) \
19
20
: "r" (v)); \
@@ -30,6 +31,7 @@ ATOMIC_OP(add, stadd)
30
31
static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
31
32
{ \
32
33
asm volatile( \
34
+ __LSE_PREAMBLE \
33
35
" " #asm_op #mb " %w[i], %w[i], %[v]" \
34
36
: [i] "+r" (i), [v] "+Q" (v->counter) \
35
37
: "r" (v) \
@@ -58,6 +60,7 @@ static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
58
60
u32 tmp; \
59
61
\
60
62
asm volatile( \
63
+ __LSE_PREAMBLE \
61
64
" ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
62
65
" add %w[i], %w[i], %w[tmp]" \
63
66
: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
@@ -77,6 +80,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory")
77
80
static inline void __lse_atomic_and (int i , atomic_t * v )
78
81
{
79
82
asm volatile (
83
+ __LSE_PREAMBLE
80
84
" mvn %w[i], %w[i]\n"
81
85
" stclr %w[i], %[v]"
82
86
: [i ] "+&r" (i ), [v ] "+Q" (v -> counter )
@@ -87,6 +91,7 @@ static inline void __lse_atomic_and(int i, atomic_t *v)
87
91
static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \
88
92
{ \
89
93
asm volatile( \
94
+ __LSE_PREAMBLE \
90
95
" mvn %w[i], %w[i]\n" \
91
96
" ldclr" #mb " %w[i], %w[i], %[v]" \
92
97
: [i] "+&r" (i), [v] "+Q" (v->counter) \
@@ -106,6 +111,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
106
111
static inline void __lse_atomic_sub (int i , atomic_t * v )
107
112
{
108
113
asm volatile (
114
+ __LSE_PREAMBLE
109
115
" neg %w[i], %w[i]\n"
110
116
" stadd %w[i], %[v]"
111
117
: [i ] "+&r" (i ), [v ] "+Q" (v -> counter )
@@ -118,6 +124,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
118
124
u32 tmp; \
119
125
\
120
126
asm volatile( \
127
+ __LSE_PREAMBLE \
121
128
" neg %w[i], %w[i]\n" \
122
129
" ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
123
130
" add %w[i], %w[i], %w[tmp]" \
@@ -139,6 +146,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory")
139
146
static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
140
147
{ \
141
148
asm volatile( \
149
+ __LSE_PREAMBLE \
142
150
" neg %w[i], %w[i]\n" \
143
151
" ldadd" #mb " %w[i], %w[i], %[v]" \
144
152
: [i] "+&r" (i), [v] "+Q" (v->counter) \
@@ -159,6 +167,7 @@ ATOMIC_FETCH_OP_SUB( , al, "memory")
159
167
static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
160
168
{ \
161
169
asm volatile( \
170
+ __LSE_PREAMBLE \
162
171
" " #asm_op " %[i], %[v]\n" \
163
172
: [i] "+r" (i), [v] "+Q" (v->counter) \
164
173
: "r" (v)); \
@@ -175,6 +184,7 @@ ATOMIC64_OP(add, stadd)
175
184
static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
176
185
{ \
177
186
asm volatile( \
187
+ __LSE_PREAMBLE \
178
188
" " #asm_op #mb " %[i], %[i], %[v]" \
179
189
: [i] "+r" (i), [v] "+Q" (v->counter) \
180
190
: "r" (v) \
@@ -203,6 +213,7 @@ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
203
213
unsigned long tmp; \
204
214
\
205
215
asm volatile( \
216
+ __LSE_PREAMBLE \
206
217
" ldadd" #mb " %[i], %x[tmp], %[v]\n" \
207
218
" add %[i], %[i], %x[tmp]" \
208
219
: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
@@ -222,6 +233,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory")
222
233
static inline void __lse_atomic64_and (s64 i , atomic64_t * v )
223
234
{
224
235
asm volatile (
236
+ __LSE_PREAMBLE
225
237
" mvn %[i], %[i]\n"
226
238
" stclr %[i], %[v]"
227
239
: [i ] "+&r" (i ), [v ] "+Q" (v -> counter )
@@ -232,6 +244,7 @@ static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
232
244
static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
233
245
{ \
234
246
asm volatile( \
247
+ __LSE_PREAMBLE \
235
248
" mvn %[i], %[i]\n" \
236
249
" ldclr" #mb " %[i], %[i], %[v]" \
237
250
: [i] "+&r" (i), [v] "+Q" (v->counter) \
@@ -251,6 +264,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
251
264
static inline void __lse_atomic64_sub (s64 i , atomic64_t * v )
252
265
{
253
266
asm volatile (
267
+ __LSE_PREAMBLE
254
268
" neg %[i], %[i]\n"
255
269
" stadd %[i], %[v]"
256
270
: [i ] "+&r" (i ), [v ] "+Q" (v -> counter )
@@ -263,6 +277,7 @@ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
263
277
unsigned long tmp; \
264
278
\
265
279
asm volatile( \
280
+ __LSE_PREAMBLE \
266
281
" neg %[i], %[i]\n" \
267
282
" ldadd" #mb " %[i], %x[tmp], %[v]\n" \
268
283
" add %[i], %[i], %x[tmp]" \
@@ -284,6 +299,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
284
299
static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
285
300
{ \
286
301
asm volatile( \
302
+ __LSE_PREAMBLE \
287
303
" neg %[i], %[i]\n" \
288
304
" ldadd" #mb " %[i], %[i], %[v]" \
289
305
: [i] "+&r" (i), [v] "+Q" (v->counter) \
@@ -305,6 +321,7 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
305
321
unsigned long tmp ;
306
322
307
323
asm volatile (
324
+ __LSE_PREAMBLE
308
325
"1: ldr %x[tmp], %[v]\n"
309
326
" subs %[ret], %x[tmp], #1\n"
310
327
" b.lt 2f\n"
@@ -332,6 +349,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
332
349
unsigned long tmp; \
333
350
\
334
351
asm volatile( \
352
+ __LSE_PREAMBLE \
335
353
" mov %" #w "[tmp], %" #w "[old]\n" \
336
354
" cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
337
355
" mov %" #w "[ret], %" #w "[tmp]" \
@@ -379,6 +397,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \
379
397
register unsigned long x4 asm ("x4" ) = (unsigned long )ptr ; \
380
398
\
381
399
asm volatile ( \
400
+ __LSE_PREAMBLE \
382
401
" casp " #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
383
402
" eor %[old1], %[old1], %[oldval1]\n" \
384
403
" eor %[old2], %[old2], %[oldval2]\n" \
0 commit comments