|
| 1 | +/* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | + |
| 3 | +#ifndef __ASM_CSKY_ATOMIC_H |
| 4 | +#define __ASM_CSKY_ATOMIC_H |
| 5 | + |
| 6 | +#ifdef CONFIG_SMP |
| 7 | +#include <asm-generic/atomic64.h> |
| 8 | + |
| 9 | +#include <asm/cmpxchg.h> |
| 10 | +#include <asm/barrier.h> |
| 11 | + |
| 12 | +#define __atomic_acquire_fence() __bar_brarw() |
| 13 | + |
| 14 | +#define __atomic_release_fence() __bar_brwaw() |
| 15 | + |
| 16 | +static __always_inline int arch_atomic_read(const atomic_t *v) |
| 17 | +{ |
| 18 | + return READ_ONCE(v->counter); |
| 19 | +} |
| 20 | +static __always_inline void arch_atomic_set(atomic_t *v, int i) |
| 21 | +{ |
| 22 | + WRITE_ONCE(v->counter, i); |
| 23 | +} |
| 24 | + |
| 25 | +#define ATOMIC_OP(op) \ |
| 26 | +static __always_inline \ |
| 27 | +void arch_atomic_##op(int i, atomic_t *v) \ |
| 28 | +{ \ |
| 29 | + unsigned long tmp; \ |
| 30 | + __asm__ __volatile__ ( \ |
| 31 | + "1: ldex.w %0, (%2) \n" \ |
| 32 | + " " #op " %0, %1 \n" \ |
| 33 | + " stex.w %0, (%2) \n" \ |
| 34 | + " bez %0, 1b \n" \ |
| 35 | + : "=&r" (tmp) \ |
| 36 | + : "r" (i), "r" (&v->counter) \ |
| 37 | + : "memory"); \ |
| 38 | +} |
| 39 | + |
| 40 | +ATOMIC_OP(add) |
| 41 | +ATOMIC_OP(sub) |
| 42 | +ATOMIC_OP(and) |
| 43 | +ATOMIC_OP( or) |
| 44 | +ATOMIC_OP(xor) |
| 45 | + |
| 46 | +#undef ATOMIC_OP |
| 47 | + |
| 48 | +#define ATOMIC_FETCH_OP(op) \ |
| 49 | +static __always_inline \ |
| 50 | +int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ |
| 51 | +{ \ |
| 52 | + register int ret, tmp; \ |
| 53 | + __asm__ __volatile__ ( \ |
| 54 | + "1: ldex.w %0, (%3) \n" \ |
| 55 | + " mov %1, %0 \n" \ |
| 56 | + " " #op " %0, %2 \n" \ |
| 57 | + " stex.w %0, (%3) \n" \ |
| 58 | + " bez %0, 1b \n" \ |
| 59 | + : "=&r" (tmp), "=&r" (ret) \ |
| 60 | + : "r" (i), "r"(&v->counter) \ |
| 61 | + : "memory"); \ |
| 62 | + return ret; \ |
| 63 | +} |
| 64 | + |
| 65 | +#define ATOMIC_OP_RETURN(op, c_op) \ |
| 66 | +static __always_inline \ |
| 67 | +int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ |
| 68 | +{ \ |
| 69 | + return arch_atomic_fetch_##op##_relaxed(i, v) c_op i; \ |
| 70 | +} |
| 71 | + |
| 72 | +#define ATOMIC_OPS(op, c_op) \ |
| 73 | + ATOMIC_FETCH_OP(op) \ |
| 74 | + ATOMIC_OP_RETURN(op, c_op) |
| 75 | + |
| 76 | +ATOMIC_OPS(add, +) |
| 77 | +ATOMIC_OPS(sub, -) |
| 78 | + |
| 79 | +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed |
| 80 | +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed |
| 81 | + |
| 82 | +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed |
| 83 | +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed |
| 84 | + |
| 85 | +#undef ATOMIC_OPS |
| 86 | +#undef ATOMIC_OP_RETURN |
| 87 | + |
| 88 | +#define ATOMIC_OPS(op) \ |
| 89 | + ATOMIC_FETCH_OP(op) |
| 90 | + |
| 91 | +ATOMIC_OPS(and) |
| 92 | +ATOMIC_OPS( or) |
| 93 | +ATOMIC_OPS(xor) |
| 94 | + |
| 95 | +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed |
| 96 | +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed |
| 97 | +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed |
| 98 | + |
| 99 | +#undef ATOMIC_OPS |
| 100 | + |
| 101 | +#undef ATOMIC_FETCH_OP |
| 102 | + |
| 103 | +#define ATOMIC_OP() \ |
| 104 | +static __always_inline \ |
| 105 | +int arch_atomic_xchg_relaxed(atomic_t *v, int n) \ |
| 106 | +{ \ |
| 107 | + return __xchg_relaxed(n, &(v->counter), 4); \ |
| 108 | +} \ |
| 109 | +static __always_inline \ |
| 110 | +int arch_atomic_cmpxchg_relaxed(atomic_t *v, int o, int n) \ |
| 111 | +{ \ |
| 112 | + return __cmpxchg_relaxed(&(v->counter), o, n, 4); \ |
| 113 | +} \ |
| 114 | +static __always_inline \ |
| 115 | +int arch_atomic_cmpxchg_acquire(atomic_t *v, int o, int n) \ |
| 116 | +{ \ |
| 117 | + return __cmpxchg_acquire(&(v->counter), o, n, 4); \ |
| 118 | +} \ |
| 119 | +static __always_inline \ |
| 120 | +int arch_atomic_cmpxchg(atomic_t *v, int o, int n) \ |
| 121 | +{ \ |
| 122 | + return __cmpxchg(&(v->counter), o, n, 4); \ |
| 123 | +} |
| 124 | + |
| 125 | +#define ATOMIC_OPS() \ |
| 126 | + ATOMIC_OP() |
| 127 | + |
| 128 | +ATOMIC_OPS() |
| 129 | + |
| 130 | +#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed |
| 131 | +#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed |
| 132 | +#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire |
| 133 | +#define arch_atomic_cmpxchg arch_atomic_cmpxchg |
| 134 | + |
| 135 | +#undef ATOMIC_OPS |
| 136 | +#undef ATOMIC_OP |
| 137 | + |
| 138 | +#else |
| 139 | +#include <asm-generic/atomic.h> |
| 140 | +#endif |
| 141 | + |
| 142 | +#endif /* __ASM_CSKY_ATOMIC_H */ |
0 commit comments