Skip to content

Commit 0d2d23b

Browse files
ancientmodernfelicitiapicostove
authored andcommitted
include: add common header files for riscv64
Co-authored-by: Yixue Zhao <[email protected]> Co-authored-by: stove <[email protected]> Signed-off-by: Haorong Lu <[email protected]> --- - rebased - imported a page_size() type fix (authored by Cryolitia PukNgae) Signed-off-by: PukNgae Cryolitia <[email protected]> Signed-off-by: Alexander Mikhalitsyn <[email protected]>
1 parent d8be857 commit 0d2d23b

File tree

5 files changed

+232
-0
lines changed

5 files changed

+232
-0
lines changed
Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
#ifndef __CR_ATOMIC_H__
2+
#define __CR_ATOMIC_H__
3+
4+
typedef struct {
5+
int counter;
6+
} atomic_t;
7+
8+
/* Copied from the Linux header arch/riscv/include/asm/barrier.h */
9+
10+
#define nop() __asm__ __volatile__("nop")
11+
12+
#define RISCV_FENCE(p, s) __asm__ __volatile__("fence " #p "," #s : : : "memory")
13+
14+
/* These barriers need to enforce ordering on both devices or memory. */
15+
#define mb() RISCV_FENCE(iorw, iorw)
16+
#define rmb() RISCV_FENCE(ir, ir)
17+
#define wmb() RISCV_FENCE(ow, ow)
18+
19+
/* These barriers do not need to enforce ordering on devices, just memory. */
20+
#define __smp_mb() RISCV_FENCE(rw, rw)
21+
#define __smp_rmb() RISCV_FENCE(r, r)
22+
#define __smp_wmb() RISCV_FENCE(w, w)
23+
24+
#define __smp_store_release(p, v) \
25+
do { \
26+
compiletime_assert_atomic_type(*p); \
27+
RISCV_FENCE(rw, w); \
28+
WRITE_ONCE(*p, v); \
29+
} while (0)
30+
31+
#define __smp_load_acquire(p) \
32+
({ \
33+
typeof(*p) ___p1 = READ_ONCE(*p); \
34+
compiletime_assert_atomic_type(*p); \
35+
RISCV_FENCE(r, rw); \
36+
___p1; \
37+
})
38+
39+
/* Copied from the Linux kernel header arch/riscv/include/asm/atomic.h */
40+
41+
static inline int atomic_read(const atomic_t *v)
42+
{
43+
return (*(volatile int *)&(v)->counter);
44+
}
45+
46+
static inline void atomic_set(atomic_t *v, int i)
47+
{
48+
v->counter = i;
49+
}
50+
51+
#define atomic_get atomic_read
52+
53+
static inline int atomic_add_return(int i, atomic_t *v)
54+
{
55+
int result;
56+
57+
asm volatile("amoadd.w.aqrl %1, %2, %0" : "+A"(v->counter), "=r"(result) : "r"(i) : "memory");
58+
__smp_mb();
59+
return result + i;
60+
}
61+
62+
static inline int atomic_sub_return(int i, atomic_t *v)
63+
{
64+
return atomic_add_return(-i, v);
65+
}
66+
67+
static inline int atomic_inc(atomic_t *v)
68+
{
69+
return atomic_add_return(1, v) - 1;
70+
}
71+
72+
static inline int atomic_add(int val, atomic_t *v)
73+
{
74+
return atomic_add_return(val, v) - val;
75+
}
76+
77+
static inline int atomic_dec(atomic_t *v)
78+
{
79+
return atomic_sub_return(1, v) + 1;
80+
}
81+
82+
/* true if the result is 0, or false for all other cases. */
83+
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
84+
#define atomic_dec_return(v) (atomic_sub_return(1, v))
85+
86+
#define atomic_inc_return(v) (atomic_add_return(1, v))
87+
88+
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
89+
{
90+
unsigned long tmp;
91+
int oldval;
92+
93+
__smp_mb();
94+
95+
asm volatile("1:\n"
96+
" lr.w %1, %2\n"
97+
" bne %1, %3, 2f\n"
98+
" sc.w %0, %4, %2\n"
99+
" bnez %0, 1b\n"
100+
"2:"
101+
: "=&r"(tmp), "=&r"(oldval), "+A"(ptr->counter)
102+
: "r"(old), "r"(new)
103+
: "memory");
104+
105+
__smp_mb();
106+
return oldval;
107+
}
108+
109+
#endif /* __CR_ATOMIC_H__ */
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
#ifndef __CR_ASM_BITOPS_H__
2+
#define __CR_ASM_BITOPS_H__
3+
4+
#include "common/compiler.h"
5+
#include "common/asm-generic/bitops.h"
6+
7+
#define BITS_PER_LONG 64
8+
9+
#define BIT_MASK(nr) ((1##UL) << ((nr) % BITS_PER_LONG))
10+
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
11+
12+
#define __AMO(op) "amo" #op ".d"
13+
14+
#define __test_and_op_bit_ord(op, mod, nr, addr, ord) \
15+
({ \
16+
unsigned long __res, __mask; \
17+
__mask = BIT_MASK(nr); \
18+
__asm__ __volatile__(__AMO(op) #ord " %0, %2, %1" \
19+
: "=r"(__res), "+A"(addr[BIT_WORD(nr)]) \
20+
: "r"(mod(__mask)) \
21+
: "memory"); \
22+
((__res & __mask) != 0); \
23+
})
24+
25+
#define __op_bit_ord(op, mod, nr, addr, ord) \
26+
__asm__ __volatile__(__AMO(op) #ord " zero, %1, %0" \
27+
: "+A"(addr[BIT_WORD(nr)]) \
28+
: "r"(mod(BIT_MASK(nr))) \
29+
: "memory");
30+
31+
#define __test_and_op_bit(op, mod, nr, addr) __test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
32+
#define __op_bit(op, mod, nr, addr) __op_bit_ord(op, mod, nr, addr, )
33+
34+
/* Bitmask modifiers */
35+
#define __NOP(x) (x)
36+
#define __NOT(x) (~(x))
37+
38+
/**
39+
* test_and_set_bit - Set a bit and return its old value
40+
* @nr: Bit to set
41+
* @addr: Address to count from
42+
*
43+
* This operation may be reordered on other architectures than x86.
44+
*/
45+
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
46+
{
47+
return __test_and_op_bit(or, __NOP, nr, addr);
48+
}
49+
50+
#endif /* __CR_ASM_BITOPS_H__ */
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
#ifndef __CR_BITSPERLONG_H__
2+
#define __CR_BITSPERLONG_H__
3+
4+
#define BITS_PER_LONG 64
5+
6+
#endif /* __CR_BITSPERLONG_H__ */
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
#ifndef __CR_LINKAGE_H__
2+
#define __CR_LINKAGE_H__
3+
4+
#ifdef __ASSEMBLY__
5+
6+
#define __ALIGN .align 4, 0x00
7+
#define __ALIGN_STR ".align 4, 0x00"
8+
9+
#define GLOBAL(name) \
10+
.globl name; \
11+
name:
12+
13+
#define ENTRY(name) \
14+
.globl name; \
15+
.type name, @function; \
16+
__ALIGN; \
17+
name:
18+
19+
#define END(sym) .size sym, .- sym
20+
21+
#endif /* __ASSEMBLY__ */
22+
23+
#endif /* __CR_LINKAGE_H__ */
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
#ifndef __CR_ASM_PAGE_H__
2+
#define __CR_ASM_PAGE_H__
3+
4+
#define ARCH_HAS_LONG_PAGES
5+
6+
#ifndef CR_NOGLIBC
7+
#include <string.h> /* ffsl() */
8+
#include <unistd.h> /* _SC_PAGESIZE */
9+
10+
extern unsigned __page_size;
11+
extern unsigned __page_shift;
12+
13+
static inline unsigned page_size(void)
14+
{
15+
if (!__page_size)
16+
__page_size = sysconf(_SC_PAGESIZE);
17+
return __page_size;
18+
}
19+
20+
static inline unsigned page_shift(void)
21+
{
22+
if (!__page_shift)
23+
__page_shift = (ffsl(page_size()) - 1);
24+
return __page_shift;
25+
}
26+
27+
/*
28+
* Don't add ifdefs for PAGE_SIZE: if any header defines it as a constant
29+
* on aarch64, then we need refrain using PAGE_SIZE in criu and use
30+
* page_size() across sources (as it may differ on aarch64).
31+
*/
32+
#define PAGE_SIZE page_size()
33+
#define PAGE_MASK (~(PAGE_SIZE - 1))
34+
#define PAGE_SHIFT page_shift()
35+
36+
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
37+
38+
#else /* CR_NOGLIBC */
39+
40+
extern unsigned long page_size(void);
41+
#define PAGE_SIZE page_size()
42+
43+
#endif /* CR_NOGLIBC */
44+
#endif /* __CR_ASM_PAGE_H__ */

0 commit comments

Comments
 (0)