@@ -24,6 +24,7 @@ extern struct target_ops gdbstub_ops;
2424#endif
2525
2626#include "decode.h"
27+ #include "io.h"
2728#include "mpool.h"
2829#include "riscv.h"
2930#include "riscv_private.h"
@@ -51,7 +52,10 @@ extern struct target_ops gdbstub_ops;
5152 _(breakpoint, 3) /* Breakpoint */ \
5253 _ (load_misaligned , 4 ) /* Load address misaligned */ \
5354 _ (store_misaligned , 6 ) /* Store/AMO address misaligned */ \
54- _ (ecall_M , 11 ) /* Environment call from M-mode */
55+ _ (ecall_M , 11 ) /* Environment call from M-mode */ \
56+ _ (insn_pgfault , 12 ) /* Instruction page fault */ \
57+ _ (load_pgfault , 13 ) /* Load page fault */ \
58+ _ (store_pgfault , 15 ) /* Store page fault */
5559/* clang-format on */
5660
5761enum {
@@ -196,6 +200,8 @@ static uint32_t *csr_get_ptr(riscv_t *rv, uint32_t csr)
196200 case CSR_FCSR :
197201 return (uint32_t * ) (& rv -> csr_fcsr );
198202#endif
203+ case CSR_SATP :
204+ return (uint32_t * ) (& rv -> csr_satp );
199205 default :
200206 return NULL ;
201207 }
@@ -220,7 +226,16 @@ static uint32_t csr_csrrw(riscv_t *rv, uint32_t csr, uint32_t val)
220226 out &= FFLAG_MASK ;
221227#endif
222228
223- * c = val ;
229+ if (c == & rv -> csr_satp ) {
230+ const uint8_t mode_sv32 = val >> 31 ;
231+ if (mode_sv32 )
232+ * c = val & MASK (22 ); /* store ppn */
233+ else /* bare mode */
234+ * c = 0 ; /* virtual mem addr maps to same physical mem addr directly
235+ */
236+ } else {
237+ * c = val ;
238+ }
224239
225240 return out ;
226241}
@@ -456,7 +471,7 @@ static bool do_fuse3(riscv_t *rv, rv_insn_t *ir, uint64_t cycle, uint32_t PC)
456471 for (int i = 0 ; i < ir -> imm2 ; i ++ ) {
457472 uint32_t addr = rv -> X [fuse [i ].rs1 ] + fuse [i ].imm ;
458473 RV_EXC_MISALIGN_HANDLER (3 , store , false, 1 );
459- rv -> io .mem_write_w (addr , rv -> X [fuse [i ].rs2 ]);
474+ rv -> io .mem_write_w (rv , addr , rv -> X [fuse [i ].rs2 ]);
460475 }
461476 PC += ir -> imm2 * 4 ;
462477 if (unlikely (RVOP_NO_NEXT (ir ))) {
@@ -480,7 +495,7 @@ static bool do_fuse4(riscv_t *rv, rv_insn_t *ir, uint64_t cycle, uint32_t PC)
480495 for (int i = 0 ; i < ir -> imm2 ; i ++ ) {
481496 uint32_t addr = rv -> X [fuse [i ].rs1 ] + fuse [i ].imm ;
482497 RV_EXC_MISALIGN_HANDLER (3 , load , false, 1 );
483- rv -> X [fuse [i ].rd ] = rv -> io .mem_read_w (addr );
498+ rv -> X [fuse [i ].rd ] = rv -> io .mem_read_w (rv , addr );
484499 }
485500 PC += ir -> imm2 * 4 ;
486501 if (unlikely (RVOP_NO_NEXT (ir ))) {
@@ -604,16 +619,18 @@ static void block_translate(riscv_t *rv, block_t *block)
604619 block -> pc_start = block -> pc_end = rv -> PC ;
605620
606621 rv_insn_t * prev_ir = NULL ;
607- rv_insn_t * ir = mpool_calloc (rv -> block_ir_mp );
622+ rv_insn_t * ir = mpool_alloc (rv -> block_ir_mp );
608623 block -> ir_head = ir ;
609624
610625 /* translate the basic block */
611626 while (true) {
627+ memset (ir , 0 , sizeof (rv_insn_t ));
628+
612629 if (prev_ir )
613630 prev_ir -> next = ir ;
614631
615632 /* fetch the next instruction */
616- const uint32_t insn = rv -> io .mem_ifetch (block -> pc_end );
633+ const uint32_t insn = rv -> io .mem_ifetch (rv , block -> pc_end );
617634
618635 /* decode the instruction */
619636 if (!rv_decode (ir , insn )) {
@@ -644,7 +661,7 @@ static void block_translate(riscv_t *rv, block_t *block)
644661 break ;
645662 }
646663
647- ir = mpool_calloc (rv -> block_ir_mp );
664+ ir = mpool_alloc (rv -> block_ir_mp );
648665 }
649666
650667 assert (prev_ir );
@@ -691,7 +708,7 @@ static bool detect_memset(riscv_t *rv, size_t type)
691708
692709 uint32_t tmp_pc = rv -> PC ;
693710 for (uint32_t i = 0 ; i < memset_len ; i ++ ) {
694- const uint32_t insn = rv -> io .mem_ifetch (tmp_pc );
711+ const uint32_t insn = rv -> io .mem_ifetch (rv , tmp_pc );
695712 if (unlikely (insn != memset_insn [i ]))
696713 return false;
697714 tmp_pc += 4 ;
@@ -712,7 +729,7 @@ static bool detect_memcpy(riscv_t *rv, size_t type)
712729
713730 uint32_t tmp_pc = rv -> PC ;
714731 for (uint32_t i = 0 ; i < memcpy_len ; i ++ ) {
715- const uint32_t insn = rv -> io .mem_ifetch (tmp_pc );
732+ const uint32_t insn = rv -> io .mem_ifetch (rv , tmp_pc );
716733 if (unlikely (insn != memcpy_insn [i ]))
717734 return false;
718735 tmp_pc += 4 ;
@@ -1178,6 +1195,230 @@ void rv_step(void *arg)
11781195#endif
11791196}
11801197
1198+ static bool ppn_is_valid (riscv_t * rv , uint32_t ppn )
1199+ {
1200+ vm_attr_t * attr = PRIV (rv );
1201+ const uint32_t nr_pg_max = attr -> mem_size / RV_PG_SIZE ;
1202+ return ppn < nr_pg_max ;
1203+ }
1204+
1205+ #define PAGE_TABLE (ppn ) \
1206+ ppn_is_valid(rv, ppn) ? (uint32_t *) &attr->mem[ppn << (RV_PG_SHIFT - 2)] \
1207+ : NULL
1208+
1209+ /* Walk through page tables and get the corresponding PTE by virtual address if
1210+ * exists
1211+ * @rv: RISC-V emulator
1212+ * @addr: virtual address
1213+ * @return: NULL if a not found or fault else the corresponding PTE
1214+ */
1215+ static uint32_t * mmu_walk (riscv_t * rv , const uint32_t addr )
1216+ {
1217+ vm_attr_t * attr = PRIV (rv );
1218+ uint32_t ppn = rv -> csr_satp ;
1219+ if (ppn == 0 ) /* Bare mode */
1220+ return NULL ;
1221+
1222+ /* start from root page table */
1223+ uint32_t * page_table = PAGE_TABLE (ppn );
1224+ if (!page_table )
1225+ return NULL ;
1226+
1227+ for (int level = 1 ; level >= 0 ; level -- ) {
1228+ uint32_t vpn = addr >> RV_PG_SHIFT >> (level * (RV_PG_SHIFT - 2 ));
1229+ uint32_t * pte = page_table + vpn ;
1230+
1231+ /* PTE XWRV bit in order */
1232+ uint8_t XWRV_bit = (* pte & MASK (4 ));
1233+ switch (XWRV_bit ) {
1234+ case 0b0001 : /* next level of the page table */
1235+ page_table = PAGE_TABLE (ppn );
1236+ if (!page_table )
1237+ return NULL ;
1238+ break ;
1239+ case 0b0011 :
1240+ case 0b0111 :
1241+ case 0b1001 :
1242+ case 0b1011 :
1243+ case 0b1111 :
1244+ ppn = (* pte >> (RV_PG_SHIFT - 2 ));
1245+ if (unlikely (ppn ) & MASK (10 )) /* misaligned superpage */
1246+ return NULL ;
1247+ return pte ; /* leaf PTE */
1248+ case 0b0101 :
1249+ case 0b1101 :
1250+ return NULL ;
1251+ }
1252+ }
1253+
1254+ return NULL ;
1255+ }
1256+
1257+ /* Verify the PTE and generate corresponding faults if needed
1258+ * @op: the operation
1259+ * @rv: RISC-V emulator
1260+ * @pte: to be verified pte
1261+ * @addr: the corresponding virtual address to cause fault
1262+ * @return: false if a corresponding fault is generated else true
1263+ */
1264+ /* FIXME: handle access fault */
1265+ #define MMU_FAULT_CHECK (op , rv , pte , addr , access_bits ) \
1266+ mmu_##op##_fault_check(rv, pte, addr, access_bits)
1267+ #define MMU_FAULT_CHECK_IMPL (op , pgfault ) \
1268+ static bool mmu_##op##_fault_check(riscv_t *rv, uint32_t *pte, \
1269+ uint32_t addr, uint32_t access_bits) \
1270+ { \
1271+ if (!pte && rv->csr_satp) { /* not found */ \
1272+ rv_except_ ##pgfault(rv, addr); \
1273+ return false; \
1274+ } else if (pte && \
1275+ (!(*pte & PTE_V) || (!(*pte & PTE_R) && (*pte & PTE_W)))) { \
1276+ rv_except_##pgfault(rv, addr); \
1277+ return false; \
1278+ } else if (pte && (!(*pte & PTE_X) && (access_bits & PTE_X))) { \
1279+ rv_except_##pgfault(rv, addr); \
1280+ return false; \
1281+ } else if (pte && (!(!(MSTATUS_MXR & rv->csr_mstatus) && \
1282+ !(*pte & PTE_R) && (access_bits & PTE_R)) && \
1283+ !((MSTATUS_MXR & rv->csr_mstatus) && \
1284+ !((*pte & PTE_R) | (*pte & PTE_X)) && \
1285+ (access_bits & PTE_R)))) { \
1286+ rv_except_##pgfault(rv, addr); \
1287+ return false; \
1288+ } else if (pte && ((MSTATUS_MPRV & rv->csr_mstatus) && \
1289+ !(MSTATUS_MPPH & \
1290+ rv->csr_mstatus) && /* MPP=01 means S-mode */ \
1291+ (MSTATUS_MPPL & rv -> csr_mstatus ))) { \
1292+ if (!(MSTATUS_SUM & rv -> csr_mstatus ) && (* pte & PTE_U )) { \
1293+ rv_except_ ##pgfault (rv, addr); \
1294+ return false; \
1295+ } \
1296+ } \
1297+ return true; \
1298+ }
1299+
1300+ MMU_FAULT_CHECK_IMPL (ifetch , insn_pgfault )
1301+ MMU_FAULT_CHECK_IMPL (read , load_pgfault )
1302+ MMU_FAULT_CHECK_IMPL (write , store_pgfault )
1303+
1304+ #define get_ppn_and_offset (ppn , offset ) \
1305+ do { \
1306+ ppn = *pte << RV_PG_SHIFT; \
1307+ offset = addr & MASK(RV_PG_SHIFT); \
1308+ } while (0)
1309+
1310+ uint32_t mmu_ifetch (riscv_t * rv , const uint32_t addr )
1311+ {
1312+ uint32_t * pte = mmu_walk (rv , addr );
1313+ bool ok = MMU_FAULT_CHECK (ifetch , rv , pte , addr , PTE_X );
1314+ if (unlikely (!ok ))
1315+ return 0 ;
1316+
1317+ if (rv -> csr_satp ) {
1318+ uint32_t ppn ;
1319+ uint32_t offset ;
1320+ get_ppn_and_offset (ppn , offset );
1321+ return memory_ifetch (ppn | offset );
1322+ }
1323+ return memory_ifetch (addr );
1324+ }
1325+
1326+ uint32_t mmu_read_w (riscv_t * rv , const uint32_t addr )
1327+ {
1328+ uint32_t * pte = mmu_walk (rv , addr );
1329+ bool ok = MMU_FAULT_CHECK (read , rv , pte , addr , PTE_R );
1330+ if (unlikely (!ok ))
1331+ return 0 ;
1332+
1333+ if (rv -> csr_satp ) {
1334+ uint32_t ppn ;
1335+ uint32_t offset ;
1336+ get_ppn_and_offset (ppn , offset );
1337+ return memory_read_w (ppn | offset );
1338+ }
1339+ return memory_read_w (addr );
1340+ }
1341+
1342+ uint16_t mmu_read_s (riscv_t * rv , const uint32_t addr )
1343+ {
1344+ uint32_t * pte = mmu_walk (rv , addr );
1345+ bool ok = MMU_FAULT_CHECK (read , rv , pte , addr , PTE_R );
1346+ if (unlikely (!ok ))
1347+ return 0 ;
1348+
1349+ if (rv -> csr_satp ) {
1350+ uint32_t ppn ;
1351+ uint32_t offset ;
1352+ get_ppn_and_offset (ppn , offset );
1353+ return memory_read_s (ppn | offset );
1354+ }
1355+ return memory_read_s (addr );
1356+ }
1357+
1358+ uint8_t mmu_read_b (riscv_t * rv , const uint32_t addr )
1359+ {
1360+ uint32_t * pte = mmu_walk (rv , addr );
1361+ bool ok = MMU_FAULT_CHECK (read , rv , pte , addr , PTE_R );
1362+ if (unlikely (!ok ))
1363+ return 0 ;
1364+
1365+ if (rv -> csr_satp ) {
1366+ uint32_t ppn ;
1367+ uint32_t offset ;
1368+ get_ppn_and_offset (ppn , offset );
1369+ return memory_read_b (ppn | offset );
1370+ }
1371+ return memory_read_b (addr );
1372+ }
1373+
1374+ void mmu_write_w (riscv_t * rv , const uint32_t addr , const uint32_t val )
1375+ {
1376+ uint32_t * pte = mmu_walk (rv , addr );
1377+ bool ok = MMU_FAULT_CHECK (write , rv , pte , addr , PTE_W );
1378+ if (unlikely (!ok ))
1379+ return ;
1380+
1381+ if (rv -> csr_satp ) {
1382+ uint32_t ppn ;
1383+ uint32_t offset ;
1384+ get_ppn_and_offset (ppn , offset );
1385+ return memory_write_w (ppn | offset , (uint8_t * ) & val );
1386+ }
1387+ return memory_write_w (addr , (uint8_t * ) & val );
1388+ }
1389+
1390+ void mmu_write_s (riscv_t * rv , const uint32_t addr , const uint16_t val )
1391+ {
1392+ uint32_t * pte = mmu_walk (rv , addr );
1393+ bool ok = MMU_FAULT_CHECK (write , rv , pte , addr , PTE_W );
1394+ if (unlikely (!ok ))
1395+ return ;
1396+
1397+ if (rv -> csr_satp ) {
1398+ uint32_t ppn ;
1399+ uint32_t offset ;
1400+ get_ppn_and_offset (ppn , offset );
1401+ return memory_write_s (ppn | offset , (uint8_t * ) & val );
1402+ }
1403+ return memory_write_s (addr , (uint8_t * ) & val );
1404+ }
1405+
1406+ void mmu_write_b (riscv_t * rv , const uint32_t addr , const uint8_t val )
1407+ {
1408+ uint32_t * pte = mmu_walk (rv , addr );
1409+ bool ok = MMU_FAULT_CHECK (write , rv , pte , addr , PTE_W );
1410+ if (unlikely (!ok ))
1411+ return ;
1412+
1413+ if (rv -> csr_satp ) {
1414+ uint32_t ppn ;
1415+ uint32_t offset ;
1416+ get_ppn_and_offset (ppn , offset );
1417+ return memory_write_b (ppn | offset , (uint8_t * ) & val );
1418+ }
1419+ return memory_write_b (addr , (uint8_t * ) & val );
1420+ }
1421+
11811422void ebreak_handler (riscv_t * rv )
11821423{
11831424 assert (rv );
@@ -1225,3 +1466,22 @@ void dump_registers(riscv_t *rv, char *out_file_path)
12251466 if (out_file_path [0 ] != '-' )
12261467 fclose (f );
12271468}
1469+
1470+ riscv_io_t mmu_io = {
1471+ /* memory read interface */
1472+ .mem_ifetch = mmu_ifetch ,
1473+ .mem_read_w = mmu_read_w ,
1474+ .mem_read_s = mmu_read_s ,
1475+ .mem_read_b = mmu_read_b ,
1476+
1477+ /* memory write interface */
1478+ .mem_write_w = mmu_write_w ,
1479+ .mem_write_s = mmu_write_s ,
1480+ .mem_write_b = mmu_write_b ,
1481+
1482+ /* system services or essential routines */
1483+ .on_ecall = ecall_handler ,
1484+ .on_ebreak = ebreak_handler ,
1485+ .on_memcpy = memcpy_handler ,
1486+ .on_memset = memset_handler ,
1487+ };
0 commit comments