Skip to content

Commit 9b9e55e

Browse files
committed
Add new system.[ch] file
Separating system-related implementations into a new file for improved modularity, but it requires changing 'dispatch_table' and 'TRAP_HANDLER_IMPL' to non-static, as system.c depends them.
1 parent 172e59d commit 9b9e55e

File tree

4 files changed

+364
-19
lines changed

4 files changed

+364
-19
lines changed

Makefile

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,10 @@ CFLAGS += $(CFLAGS_NO_CET)
4545

4646
OBJS_EXT :=
4747

48+
ifeq ($(call has, SYSTEM), 1)
49+
OBJS_EXT += system.o
50+
endif
51+
4852
# Integer Multiplication and Division instructions
4953
ENABLE_EXT_M ?= 1
5054
$(call set-feature, EXT_M)

src/emulate.c

Lines changed: 6 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,10 @@
1414
#include <emscripten.h>
1515
#endif
1616

17+
#if RV32_HAS(SYSTEM)
18+
#include "system.h"
19+
#endif /* RV32_HAS(SYSTEM) */
20+
1721
#if RV32_HAS(EXT_F)
1822
#include <math.h>
1923
#include "softfloat.h"
@@ -71,19 +75,6 @@ static void rv_trap_default_handler(riscv_t *rv)
7175
rv->PC = rv->csr_mepc; /* mret */
7276
}
7377

74-
/*
75-
* Trap might occurs during block emulation. For instance, page fault.
76-
* In order to handle trap, we have to escape from block and execute
77-
* registered trap handler. This trap_handler function helps to execute
78-
* the registered trap handler, PC by PC. Once the trap is handled,
79-
* resume the previous execution flow where cause the trap.
80-
*
81-
* Now, rv32emu supports misaligned access and page fault handling.
82-
*/
83-
#if RV32_HAS(SYSTEM)
84-
static void trap_handler(riscv_t *rv);
85-
#endif
86-
8778
/* When a trap occurs in M-mode/S-mode, m/stval is either initialized to zero or
8879
* populated with exception-specific details to assist software in managing
8980
* the trap. Otherwise, the implementation never modifies m/stval, although
@@ -104,7 +95,7 @@ static void trap_handler(riscv_t *rv);
10495
* identifier called tval, as both are handled by TRAP_HANDLER_IMPL.
10596
*/
10697
#define TRAP_HANDLER_IMPL(type, code) \
107-
static void rv_trap_##type(riscv_t *rv, uint32_t tval) \
98+
void rv_trap_##type(riscv_t *rv, uint32_t tval) \
10899
{ \
109100
/* m/stvec (Machine/Supervisor Trap-Vector Base Address Register) \
110101
* m/stvec[MXLEN-1:2]: vector base address \
@@ -590,7 +581,7 @@ static bool do_fuse5(riscv_t *rv,
590581
}
591582

592583
/* clang-format off */
593-
static const void *dispatch_table[] = {
584+
const void *dispatch_table[] = {
594585
/* RV32 instructions */
595586
#define _(inst, can_branch, insn_len, translatable, reg_mask) [rv_insn_##inst] = do_##inst,
596587
RV_INSN_LIST
@@ -1122,10 +1113,6 @@ void rv_step(void *arg)
11221113
#endif
11231114
}
11241115

1125-
#if RV32_HAS(SYSTEM)
1126-
#include "system.c"
1127-
#endif /* SYSTEM */
1128-
11291116
void ebreak_handler(riscv_t *rv)
11301117
{
11311118
assert(rv);

src/system.c

Lines changed: 307 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,307 @@
1+
/*
2+
* rv32emu is freely redistributable under the MIT License. See the file
3+
* "LICENSE" for information on usage and redistribution of this file.
4+
*/
5+
6+
#include <assert.h>
7+
8+
#include "mpool.h"
9+
#include "system.h"
10+
11+
/* FIXME:
12+
* reuse RV_TRAP_LIST with X macro
13+
* RV_TRAP_LIST should be in some header file first
14+
*/
15+
#define TRAP_HANDLER_DECL(type) \
16+
extern void rv_trap_##type(riscv_t *rv, uint32_t tval)
17+
18+
TRAP_HANDLER_DECL(pagefault_insn);
19+
TRAP_HANDLER_DECL(pagefault_load);
20+
TRAP_HANDLER_DECL(pagefault_store);
21+
22+
extern void *dispatch_table[];
23+
24+
void trap_handler(riscv_t *rv)
25+
{
26+
rv_insn_t *ir = mpool_alloc(rv->block_ir_mp);
27+
assert(ir);
28+
29+
/* set to false by sret implementation */
30+
while (rv->is_trapped && !rv_has_halted(rv)) {
31+
uint32_t insn = rv->io.mem_ifetch(rv, rv->PC);
32+
assert(insn);
33+
34+
rv_decode(ir, insn);
35+
ir->impl = dispatch_table[ir->opcode];
36+
rv->compressed = is_compressed(insn);
37+
ir->impl(rv, ir, rv->csr_cycle, rv->PC);
38+
}
39+
}
40+
41+
static bool ppn_is_valid(riscv_t *rv, uint32_t ppn)
42+
{
43+
vm_attr_t *attr = PRIV(rv);
44+
const uint32_t nr_pg_max = attr->mem_size / RV_PG_SIZE;
45+
return ppn < nr_pg_max;
46+
}
47+
48+
#define PAGE_TABLE(ppn) \
49+
ppn_is_valid(rv, ppn) \
50+
? (uint32_t *) (attr->mem->mem_base + (ppn << (RV_PG_SHIFT))) \
51+
: NULL
52+
53+
/* Walk through page tables and get the corresponding PTE by virtual address if
54+
* exists
55+
* @rv: RISC-V emulator
56+
* @addr: virtual address
57+
* @level: the level of which the PTE is located
58+
* @return: NULL if a not found or fault else the corresponding PTE
59+
*/
60+
static uint32_t *mmu_walk(riscv_t *rv, const uint32_t addr, uint32_t *level)
61+
{
62+
vm_attr_t *attr = PRIV(rv);
63+
uint32_t ppn = rv->csr_satp & MASK(22);
64+
65+
/* root page table */
66+
uint32_t *page_table = PAGE_TABLE(ppn);
67+
if (!page_table)
68+
return NULL;
69+
70+
for (int i = 1; i >= 0; i--) {
71+
*level = 2 - i;
72+
uint32_t vpn =
73+
(addr >> RV_PG_SHIFT >> (i * (RV_PG_SHIFT - 2))) & MASK(10);
74+
uint32_t *pte = page_table + vpn;
75+
76+
uint8_t XWRV_bit = (*pte & MASK(4));
77+
switch (XWRV_bit) {
78+
case NEXT_PG_TBL: /* next level of the page table */
79+
ppn = (*pte >> (RV_PG_SHIFT - 2));
80+
page_table = PAGE_TABLE(ppn);
81+
if (!page_table)
82+
return NULL;
83+
break;
84+
case RO_PAGE:
85+
case RW_PAGE:
86+
case EO_PAGE:
87+
case RX_PAGE:
88+
case RWX_PAGE:
89+
ppn = (*pte >> (RV_PG_SHIFT - 2));
90+
if (*level == 1 &&
91+
unlikely(ppn & MASK(10))) /* misaligned superpage */
92+
return NULL;
93+
return pte; /* leaf PTE */
94+
case RESRV_PAGE1:
95+
case RESRV_PAGE2:
96+
default:
97+
return NULL;
98+
}
99+
}
100+
101+
return NULL;
102+
}
103+
104+
/* Verify the PTE and generate corresponding faults if needed
105+
* @op: the operation
106+
* @rv: RISC-V emulator
107+
* @pte: to be verified pte
108+
* @addr: the corresponding virtual address to cause fault
109+
* @return: false if a any fault is generated which caused by violating the
110+
* access permission else true
111+
*/
112+
/* FIXME: handle access fault, addr out of range check */
113+
#define MMU_FAULT_CHECK(op, rv, pte, addr, access_bits) \
114+
mmu_##op##_fault_check(rv, pte, addr, access_bits)
115+
#define MMU_FAULT_CHECK_IMPL(op, pgfault) \
116+
static bool mmu_##op##_fault_check(riscv_t *rv, uint32_t *pte, \
117+
uint32_t addr, uint32_t access_bits) \
118+
{ \
119+
if (pte && (!(*pte & PTE_V))) { \
120+
rv->is_trapped = true; \
121+
rv_trap_##pgfault(rv, addr); \
122+
return false; \
123+
} \
124+
if (!(pte && (*pte & access_bits))) { \
125+
rv->is_trapped = true; \
126+
rv_trap_##pgfault(rv, addr); \
127+
return false; \
128+
} \
129+
/* \
130+
* (1) When MXR=0, only loads from pages marked readable (R=1) will \
131+
* succeed. \
132+
* \
133+
* (2) When MXR=1, loads from pages marked either readable or \
134+
* executable (R=1 or X=1) will succeed. \
135+
*/ \
136+
if (pte && ((!(SSTATUS_MXR & rv->csr_sstatus) && !(*pte & PTE_R) && \
137+
(access_bits == PTE_R)) || \
138+
((SSTATUS_MXR & rv->csr_sstatus) && \
139+
!((*pte & PTE_R) | (*pte & PTE_X)) && \
140+
(access_bits == PTE_R)))) { \
141+
rv->is_trapped = true; \
142+
rv_trap_##pgfault(rv, addr); \
143+
return false; \
144+
} \
145+
/* \
146+
* When SUM=0, S-mode memory accesses to pages that are accessible by \
147+
* U-mode will fault. \
148+
*/ \
149+
if (pte && rv->priv_mode == RV_PRIV_S_MODE && \
150+
!(SSTATUS_SUM & rv->csr_sstatus) && (*pte & PTE_U)) { \
151+
rv->is_trapped = true; \
152+
rv_trap_##pgfault(rv, addr); \
153+
return false; \
154+
} \
155+
/* PTE not found, map it in handler */ \
156+
if (!pte) { \
157+
rv->is_trapped = true; \
158+
rv_trap_##pgfault(rv, addr); \
159+
return false; \
160+
} \
161+
/* valid PTE */ \
162+
return true; \
163+
}
164+
165+
MMU_FAULT_CHECK_IMPL(ifetch, pagefault_insn)
166+
MMU_FAULT_CHECK_IMPL(read, pagefault_load)
167+
MMU_FAULT_CHECK_IMPL(write, pagefault_store)
168+
169+
#define get_ppn_and_offset(ppn, offset) \
170+
uint32_t ppn; \
171+
uint32_t offset; \
172+
do { \
173+
ppn = *pte >> (RV_PG_SHIFT - 2) << RV_PG_SHIFT; \
174+
offset = level == 1 ? addr & MASK((RV_PG_SHIFT + 10)) \
175+
: addr & MASK(RV_PG_SHIFT); \
176+
} while (0)
177+
178+
uint32_t mmu_ifetch(riscv_t *rv, const uint32_t addr)
179+
{
180+
if (!rv->csr_satp)
181+
return memory_ifetch(addr);
182+
183+
uint32_t level;
184+
uint32_t *pte = mmu_walk(rv, addr, &level);
185+
bool ok = MMU_FAULT_CHECK(ifetch, rv, pte, addr, PTE_X);
186+
if (unlikely(!ok)) {
187+
pte = mmu_walk(rv, addr, &level);
188+
}
189+
190+
get_ppn_and_offset(ppn, offset);
191+
return memory_ifetch(ppn | offset);
192+
}
193+
194+
uint32_t mmu_read_w(riscv_t *rv, const uint32_t addr)
195+
{
196+
if (!rv->csr_satp)
197+
return memory_read_w(addr);
198+
199+
uint32_t level;
200+
uint32_t *pte = mmu_walk(rv, addr, &level);
201+
bool ok = MMU_FAULT_CHECK(read, rv, pte, addr, PTE_R);
202+
if (unlikely(!ok)) {
203+
pte = mmu_walk(rv, addr, &level);
204+
}
205+
206+
get_ppn_and_offset(ppn, offset);
207+
return memory_read_w(ppn | offset);
208+
}
209+
210+
uint16_t mmu_read_s(riscv_t *rv, const uint32_t addr)
211+
{
212+
if (!rv->csr_satp)
213+
return memory_read_s(addr);
214+
215+
uint32_t level;
216+
uint32_t *pte = mmu_walk(rv, addr, &level);
217+
bool ok = MMU_FAULT_CHECK(read, rv, pte, addr, PTE_R);
218+
if (unlikely(!ok)) {
219+
pte = mmu_walk(rv, addr, &level);
220+
}
221+
222+
get_ppn_and_offset(ppn, offset);
223+
return memory_read_s(ppn | offset);
224+
}
225+
226+
uint8_t mmu_read_b(riscv_t *rv, const uint32_t addr)
227+
{
228+
if (!rv->csr_satp)
229+
return memory_read_b(addr);
230+
231+
uint32_t level;
232+
uint32_t *pte = mmu_walk(rv, addr, &level);
233+
bool ok = MMU_FAULT_CHECK(read, rv, pte, addr, PTE_R);
234+
if (unlikely(!ok)) {
235+
pte = mmu_walk(rv, addr, &level);
236+
}
237+
238+
get_ppn_and_offset(ppn, offset);
239+
return memory_read_b(ppn | offset);
240+
}
241+
242+
void mmu_write_w(riscv_t *rv, const uint32_t addr, const uint32_t val)
243+
{
244+
if (!rv->csr_satp)
245+
return memory_write_w(addr, (uint8_t *) &val);
246+
247+
uint32_t level;
248+
uint32_t *pte = mmu_walk(rv, addr, &level);
249+
bool ok = MMU_FAULT_CHECK(write, rv, pte, addr, PTE_W);
250+
if (unlikely(!ok)) {
251+
pte = mmu_walk(rv, addr, &level);
252+
}
253+
254+
get_ppn_and_offset(ppn, offset);
255+
memory_write_w(ppn | offset, (uint8_t *) &val);
256+
}
257+
258+
void mmu_write_s(riscv_t *rv, const uint32_t addr, const uint16_t val)
259+
{
260+
if (!rv->csr_satp)
261+
return memory_write_s(addr, (uint8_t *) &val);
262+
263+
uint32_t level;
264+
uint32_t *pte = mmu_walk(rv, addr, &level);
265+
bool ok = MMU_FAULT_CHECK(write, rv, pte, addr, PTE_W);
266+
if (unlikely(!ok)) {
267+
pte = mmu_walk(rv, addr, &level);
268+
}
269+
270+
get_ppn_and_offset(ppn, offset);
271+
memory_write_s(ppn | offset, (uint8_t *) &val);
272+
}
273+
274+
void mmu_write_b(riscv_t *rv, const uint32_t addr, const uint8_t val)
275+
{
276+
if (!rv->csr_satp)
277+
return memory_write_b(addr, (uint8_t *) &val);
278+
279+
uint32_t level;
280+
uint32_t *pte = mmu_walk(rv, addr, &level);
281+
bool ok = MMU_FAULT_CHECK(write, rv, pte, addr, PTE_W);
282+
if (unlikely(!ok)) {
283+
pte = mmu_walk(rv, addr, &level);
284+
}
285+
286+
get_ppn_and_offset(ppn, offset);
287+
memory_write_b(ppn | offset, (uint8_t *) &val);
288+
}
289+
290+
riscv_io_t mmu_io = {
291+
/* memory read interface */
292+
.mem_ifetch = mmu_ifetch,
293+
.mem_read_w = mmu_read_w,
294+
.mem_read_s = mmu_read_s,
295+
.mem_read_b = mmu_read_b,
296+
297+
/* memory write interface */
298+
.mem_write_w = mmu_write_w,
299+
.mem_write_s = mmu_write_s,
300+
.mem_write_b = mmu_write_b,
301+
302+
/* system services or essential routines */
303+
.on_ecall = ecall_handler,
304+
.on_ebreak = ebreak_handler,
305+
.on_memcpy = memcpy_handler,
306+
.on_memset = memset_handler,
307+
};

0 commit comments

Comments
 (0)