|
| 1 | +#ifndef __CR_ATOMIC_H__ |
| 2 | +#define __CR_ATOMIC_H__ |
| 3 | + |
| 4 | +typedef uint32_t atomic_t; |
| 5 | + |
| 6 | +/* Copied from the Linux header arch/riscv/include/asm/barrier.h */ |
| 7 | + |
| 8 | +#define nop() __asm__ __volatile__("nop") |
| 9 | + |
| 10 | +#define RISCV_FENCE(p, s) __asm__ __volatile__("fence " #p "," #s : : : "memory") |
| 11 | + |
| 12 | +/* These barriers need to enforce ordering on both devices or memory. */ |
| 13 | +#define mb() RISCV_FENCE(iorw, iorw) |
| 14 | +#define rmb() RISCV_FENCE(ir, ir) |
| 15 | +#define wmb() RISCV_FENCE(ow, ow) |
| 16 | + |
| 17 | +/* These barriers do not need to enforce ordering on devices, just memory. */ |
| 18 | +#define __smp_mb() RISCV_FENCE(rw, rw) |
| 19 | +#define __smp_rmb() RISCV_FENCE(r, r) |
| 20 | +#define __smp_wmb() RISCV_FENCE(w, w) |
| 21 | + |
| 22 | +#define __smp_store_release(p, v) \ |
| 23 | + do { \ |
| 24 | + compiletime_assert_atomic_type(*p); \ |
| 25 | + RISCV_FENCE(rw, w); \ |
| 26 | + WRITE_ONCE(*p, v); \ |
| 27 | + } while (0) |
| 28 | + |
| 29 | +#define __smp_load_acquire(p) \ |
| 30 | + ({ \ |
| 31 | + typeof(*p) ___p1 = READ_ONCE(*p); \ |
| 32 | + compiletime_assert_atomic_type(*p); \ |
| 33 | + RISCV_FENCE(r, rw); \ |
| 34 | + ___p1; \ |
| 35 | + }) |
| 36 | + |
| 37 | +/* Copied from the Linux kernel header arch/riscv/include/asm/atomic.h */ |
| 38 | + |
| 39 | +static inline int atomic_read(const atomic_t *v) |
| 40 | +{ |
| 41 | + return (*(volatile int *)v); |
| 42 | +} |
| 43 | + |
| 44 | +static inline void atomic_set(atomic_t *v, int i) |
| 45 | +{ |
| 46 | + *v = i; |
| 47 | +} |
| 48 | + |
| 49 | +#define atomic_get atomic_read |
| 50 | + |
| 51 | +static inline int atomic_add_return(int i, atomic_t *v) |
| 52 | +{ |
| 53 | + int result; |
| 54 | + |
| 55 | + asm volatile("amoadd.w.aqrl %1, %2, %0" : "+A"(*v), "=r"(result) : "r"(i) : "memory"); |
| 56 | + __smp_mb(); |
| 57 | + return result + i; |
| 58 | +} |
| 59 | + |
| 60 | +static inline int atomic_sub_return(int i, atomic_t *v) |
| 61 | +{ |
| 62 | + return atomic_add_return(-i, v); |
| 63 | +} |
| 64 | + |
| 65 | +static inline int atomic_inc(atomic_t *v) |
| 66 | +{ |
| 67 | + return atomic_add_return(1, v) - 1; |
| 68 | +} |
| 69 | + |
| 70 | +static inline int atomic_add(int val, atomic_t *v) |
| 71 | +{ |
| 72 | + return atomic_add_return(val, v) - val; |
| 73 | +} |
| 74 | + |
| 75 | +static inline int atomic_dec(atomic_t *v) |
| 76 | +{ |
| 77 | + return atomic_sub_return(1, v) + 1; |
| 78 | +} |
| 79 | + |
| 80 | +/* true if the result is 0, or false for all other cases. */ |
| 81 | +#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) |
| 82 | +#define atomic_dec_return(v) (atomic_sub_return(1, v)) |
| 83 | + |
| 84 | +#define atomic_inc_return(v) (atomic_add_return(1, v)) |
| 85 | + |
| 86 | +static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) |
| 87 | +{ |
| 88 | + unsigned long tmp; |
| 89 | + int oldval; |
| 90 | + |
| 91 | + __smp_mb(); |
| 92 | + |
| 93 | + asm volatile("1:\n" |
| 94 | + " lr.w %1, %2\n" |
| 95 | + " bne %1, %3, 2f\n" |
| 96 | + " sc.w %0, %4, %2\n" |
| 97 | + " bnez %0, 1b\n" |
| 98 | + "2:" |
| 99 | + : "=&r"(tmp), "=&r"(oldval), "+A"(*ptr) |
| 100 | + : "r"(old), "r"(new) |
| 101 | + : "memory"); |
| 102 | + |
| 103 | + __smp_mb(); |
| 104 | + return oldval; |
| 105 | +} |
| 106 | + |
| 107 | +#endif /* __CR_ATOMIC_H__ */ |
0 commit comments