Skip to content

Commit 9061237

Browse files
Leonardo Braspalmer-dabbelt
authored andcommitted
riscv/atomic.h : Deduplicate arch_atomic.*
Some functions use mostly the same asm for 32-bit and 64-bit versions. Make a macro that is generic enough and avoid code duplication. (This did not cause any change in generated asm) Signed-off-by: Leonardo Bras <[email protected]> Reviewed-by: Guo Ren <[email protected]> Reviewed-by: Andrea Parri <[email protected]> Tested-by: Guo Ren <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent 07a0a41 commit 9061237

File tree

1 file changed

+76
-88
lines changed

1 file changed

+76
-88
lines changed

arch/riscv/include/asm/atomic.h

Lines changed: 76 additions & 88 deletions
Original file line numberDiff line numberDiff line change
@@ -196,22 +196,28 @@ ATOMIC_OPS(xor, xor, i)
196196
#undef ATOMIC_FETCH_OP
197197
#undef ATOMIC_OP_RETURN
198198

199+
#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
200+
({ \
201+
__asm__ __volatile__ ( \
202+
"0: lr." sfx " %[p], %[c]\n" \
203+
" beq %[p], %[u], 1f\n" \
204+
" add %[rc], %[p], %[a]\n" \
205+
" sc." sfx ".rl %[rc], %[rc], %[c]\n" \
206+
" bnez %[rc], 0b\n" \
207+
" fence rw, rw\n" \
208+
"1:\n" \
209+
: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
210+
: [a]"r" (_a), [u]"r" (_u) \
211+
: "memory"); \
212+
})
213+
199214
/* This is required to provide a full barrier on success. */
200215
static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
201216
{
202217
int prev, rc;
203218

204-
__asm__ __volatile__ (
205-
"0: lr.w %[p], %[c]\n"
206-
" beq %[p], %[u], 1f\n"
207-
" add %[rc], %[p], %[a]\n"
208-
" sc.w.rl %[rc], %[rc], %[c]\n"
209-
" bnez %[rc], 0b\n"
210-
" fence rw, rw\n"
211-
"1:\n"
212-
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
213-
: [a]"r" (a), [u]"r" (u)
214-
: "memory");
219+
_arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
220+
215221
return prev;
216222
}
217223
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
@@ -222,77 +228,86 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
222228
s64 prev;
223229
long rc;
224230

225-
__asm__ __volatile__ (
226-
"0: lr.d %[p], %[c]\n"
227-
" beq %[p], %[u], 1f\n"
228-
" add %[rc], %[p], %[a]\n"
229-
" sc.d.rl %[rc], %[rc], %[c]\n"
230-
" bnez %[rc], 0b\n"
231-
" fence rw, rw\n"
232-
"1:\n"
233-
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
234-
: [a]"r" (a), [u]"r" (u)
235-
: "memory");
231+
_arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
232+
236233
return prev;
237234
}
238235
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
239236
#endif
240237

238+
#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
239+
({ \
240+
__asm__ __volatile__ ( \
241+
"0: lr." sfx " %[p], %[c]\n" \
242+
" bltz %[p], 1f\n" \
243+
" addi %[rc], %[p], 1\n" \
244+
" sc." sfx ".rl %[rc], %[rc], %[c]\n" \
245+
" bnez %[rc], 0b\n" \
246+
" fence rw, rw\n" \
247+
"1:\n" \
248+
: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
249+
: \
250+
: "memory"); \
251+
})
252+
241253
static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
242254
{
243255
int prev, rc;
244256

245-
__asm__ __volatile__ (
246-
"0: lr.w %[p], %[c]\n"
247-
" bltz %[p], 1f\n"
248-
" addi %[rc], %[p], 1\n"
249-
" sc.w.rl %[rc], %[rc], %[c]\n"
250-
" bnez %[rc], 0b\n"
251-
" fence rw, rw\n"
252-
"1:\n"
253-
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
254-
:
255-
: "memory");
257+
_arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
258+
256259
return !(prev < 0);
257260
}
258261

259262
#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
260263

264+
#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
265+
({ \
266+
__asm__ __volatile__ ( \
267+
"0: lr." sfx " %[p], %[c]\n" \
268+
" bgtz %[p], 1f\n" \
269+
" addi %[rc], %[p], -1\n" \
270+
" sc." sfx ".rl %[rc], %[rc], %[c]\n" \
271+
" bnez %[rc], 0b\n" \
272+
" fence rw, rw\n" \
273+
"1:\n" \
274+
: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
275+
: \
276+
: "memory"); \
277+
})
278+
261279
static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
262280
{
263281
int prev, rc;
264282

265-
__asm__ __volatile__ (
266-
"0: lr.w %[p], %[c]\n"
267-
" bgtz %[p], 1f\n"
268-
" addi %[rc], %[p], -1\n"
269-
" sc.w.rl %[rc], %[rc], %[c]\n"
270-
" bnez %[rc], 0b\n"
271-
" fence rw, rw\n"
272-
"1:\n"
273-
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
274-
:
275-
: "memory");
283+
_arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
284+
276285
return !(prev > 0);
277286
}
278287

279288
#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
280289

290+
#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
291+
({ \
292+
__asm__ __volatile__ ( \
293+
"0: lr." sfx " %[p], %[c]\n" \
294+
" addi %[rc], %[p], -1\n" \
295+
" bltz %[rc], 1f\n" \
296+
" sc." sfx ".rl %[rc], %[rc], %[c]\n" \
297+
" bnez %[rc], 0b\n" \
298+
" fence rw, rw\n" \
299+
"1:\n" \
300+
: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
301+
: \
302+
: "memory"); \
303+
})
304+
281305
static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
282306
{
283307
int prev, rc;
284308

285-
__asm__ __volatile__ (
286-
"0: lr.w %[p], %[c]\n"
287-
" addi %[rc], %[p], -1\n"
288-
" bltz %[rc], 1f\n"
289-
" sc.w.rl %[rc], %[rc], %[c]\n"
290-
" bnez %[rc], 0b\n"
291-
" fence rw, rw\n"
292-
"1:\n"
293-
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
294-
:
295-
: "memory");
309+
_arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
310+
296311
return prev - 1;
297312
}
298313

@@ -304,17 +319,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
304319
s64 prev;
305320
long rc;
306321

307-
__asm__ __volatile__ (
308-
"0: lr.d %[p], %[c]\n"
309-
" bltz %[p], 1f\n"
310-
" addi %[rc], %[p], 1\n"
311-
" sc.d.rl %[rc], %[rc], %[c]\n"
312-
" bnez %[rc], 0b\n"
313-
" fence rw, rw\n"
314-
"1:\n"
315-
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
316-
:
317-
: "memory");
322+
_arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
323+
318324
return !(prev < 0);
319325
}
320326

@@ -325,17 +331,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
325331
s64 prev;
326332
long rc;
327333

328-
__asm__ __volatile__ (
329-
"0: lr.d %[p], %[c]\n"
330-
" bgtz %[p], 1f\n"
331-
" addi %[rc], %[p], -1\n"
332-
" sc.d.rl %[rc], %[rc], %[c]\n"
333-
" bnez %[rc], 0b\n"
334-
" fence rw, rw\n"
335-
"1:\n"
336-
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
337-
:
338-
: "memory");
334+
_arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
335+
339336
return !(prev > 0);
340337
}
341338

@@ -346,17 +343,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
346343
s64 prev;
347344
long rc;
348345

349-
__asm__ __volatile__ (
350-
"0: lr.d %[p], %[c]\n"
351-
" addi %[rc], %[p], -1\n"
352-
" bltz %[rc], 1f\n"
353-
" sc.d.rl %[rc], %[rc], %[c]\n"
354-
" bnez %[rc], 0b\n"
355-
" fence rw, rw\n"
356-
"1:\n"
357-
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
358-
:
359-
: "memory");
346+
_arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
347+
360348
return prev - 1;
361349
}
362350

0 commit comments

Comments
 (0)