Skip to content

Commit eb87e56

Browse files
guoren83palmer-dabbelt
authored andcommitted
riscv: xchg: Prefetch the destination word for sc.w
The cost of changing a cacheline from shared to exclusive state can be significant, especially when this is triggered by an exclusive store, since it may result in having to retry the transaction. This patch makes use of prefetch.w to prefetch cachelines for write prior to lr/sc loops when using the xchg_small atomic routine. This patch is inspired by commit 0ea366f ("arm64: atomics: prefetch the destination word for write prior to stxr"). Signed-off-by: Guo Ren <[email protected]> Signed-off-by: Guo Ren <[email protected]> Link: https://lore.kernel.org/r/[email protected] Tested-by: Andrea Parri <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexandre Ghiti <[email protected]> Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent a5f947c commit eb87e56

File tree

1 file changed

+3
-1
lines changed

1 file changed

+3
-1
lines changed

arch/riscv/include/asm/cmpxchg.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#include <asm/hwcap.h>
1414
#include <asm/insn-def.h>
1515
#include <asm/cpufeature-macros.h>
16+
#include <asm/processor.h>
1617

1718
#define __arch_xchg_masked(sc_sfx, swap_sfx, prepend, sc_append, \
1819
swap_append, r, p, n) \
@@ -37,14 +38,15 @@
3738
\
3839
__asm__ __volatile__ ( \
3940
prepend \
41+
PREFETCHW_ASM(%5) \
4042
"0: lr.w %0, %2\n" \
4143
" and %1, %0, %z4\n" \
4244
" or %1, %1, %z3\n" \
4345
" sc.w" sc_sfx " %1, %1, %2\n" \
4446
" bnez %1, 0b\n" \
4547
sc_append \
4648
: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
47-
: "rJ" (__newx), "rJ" (~__mask) \
49+
: "rJ" (__newx), "rJ" (~__mask), "rJ" (__ptr32b) \
4850
: "memory"); \
4951
\
5052
r = (__typeof__(*(p)))((__retx & __mask) >> __s); \

0 commit comments

Comments
 (0)