@@ -167,11 +167,19 @@ static inline uint32_t read_rs2(const vm_t *vm, uint32_t insn)
167
167
168
168
/* virtual addressing */
169
169
170
+ static void mmu_invalidate_caches (vm_t * vm ) {
171
+ mmu_cache_reset_ctx (& vm -> mmu_cache_fetch_ctx );
172
+ mmu_cache_reset_ctx (& vm -> mmu_cache_load_ctx );
173
+ mmu_cache_reset_ctx (& vm -> mmu_cache_store_ctx );
174
+ }
175
+
176
+
170
177
/* Pre-verify the root page table to minimize page table access during
171
178
* translation time.
172
179
*/
173
180
static void mmu_set (vm_t * vm , uint32_t satp )
174
181
{
182
+ mmu_invalidate_caches (vm );
175
183
if (satp >> 31 ) {
176
184
uint32_t * page_table = vm -> mem_page_table (vm , satp & MASK (22 ));
177
185
if (!page_table )
@@ -228,16 +236,25 @@ static bool mmu_lookup(const vm_t *vm,
228
236
return true;
229
237
}
230
238
231
- static void mmu_translate (vm_t * vm ,
232
- uint32_t * addr ,
233
- const uint32_t access_bits ,
234
- const uint32_t set_bits ,
235
- const bool skip_privilege_test ,
236
- const uint8_t fault ,
237
- const uint8_t pfault )
239
+ static inline void mmu_translate (vm_t * vm ,
240
+ struct _mmu_cache_ctx * cctx ,
241
+ uint32_t * addr ,
242
+ const uint32_t access_bits ,
243
+ const uint32_t set_bits ,
244
+ const bool skip_privilege_test ,
245
+ const uint8_t fault ,
246
+ const uint8_t pfault )
238
247
{
248
+ const uint32_t high_part = * addr & ~MASK (RV_PAGE_SHIFT );
249
+ uint32_t caddr = mmu_cache_lookup (cctx , high_part );
250
+
239
251
/* NOTE: save virtual address, for physical accesses, to set exception. */
240
252
vm -> exc_val = * addr ;
253
+
254
+ if (caddr ) {
255
+ * addr = caddr | (* addr & MASK (RV_PAGE_SHIFT ));
256
+ return ;
257
+ }
241
258
if (!vm -> page_table )
242
259
return ;
243
260
@@ -265,16 +282,18 @@ static void mmu_translate(vm_t *vm,
265
282
* pte_ref = new_pte ;
266
283
267
284
* addr = ((* addr ) & MASK (RV_PAGE_SHIFT )) | (ppn << RV_PAGE_SHIFT );
285
+ mmu_cache_insert (cctx , high_part , ppn << RV_PAGE_SHIFT );
268
286
}
269
287
270
288
static void mmu_fence (vm_t * vm UNUSED , uint32_t insn UNUSED )
271
289
{
272
- /* no-op for now */
290
+ mmu_invalidate_caches ( vm );
273
291
}
274
292
275
293
static void mmu_fetch (vm_t * vm , uint32_t addr , uint32_t * value )
276
294
{
277
- mmu_translate (vm , & addr , (1 << 3 ), (1 << 6 ), false, RV_EXC_FETCH_FAULT ,
295
+ mmu_translate (vm , & vm -> mmu_cache_fetch_ctx ,
296
+ & addr , (1 << 3 ), (1 << 6 ), false, RV_EXC_FETCH_FAULT ,
278
297
RV_EXC_FETCH_PFAULT );
279
298
if (vm -> error )
280
299
return ;
@@ -287,7 +306,8 @@ static void mmu_load(vm_t *vm,
287
306
uint32_t * value ,
288
307
bool reserved )
289
308
{
290
- mmu_translate (vm , & addr , (1 << 1 ) | (vm -> sstatus_mxr ? (1 << 3 ) : 0 ),
309
+ mmu_translate (vm , & vm -> mmu_cache_load_ctx ,
310
+ & addr , (1 << 1 ) | (vm -> sstatus_mxr ? (1 << 3 ) : 0 ),
291
311
(1 << 6 ), vm -> sstatus_sum && vm -> s_mode , RV_EXC_LOAD_FAULT ,
292
312
RV_EXC_LOAD_PFAULT );
293
313
if (vm -> error )
@@ -306,7 +326,8 @@ static bool mmu_store(vm_t *vm,
306
326
uint32_t value ,
307
327
bool cond )
308
328
{
309
- mmu_translate (vm , & addr , (1 << 2 ), (1 << 6 ) | (1 << 7 ),
329
+ mmu_translate (vm , & vm -> mmu_cache_store_ctx ,
330
+ & addr , (1 << 2 ), (1 << 6 ) | (1 << 7 ),
310
331
vm -> sstatus_sum && vm -> s_mode , RV_EXC_STORE_FAULT ,
311
332
RV_EXC_STORE_PFAULT );
312
333
if (vm -> error )
@@ -336,6 +357,8 @@ void vm_set_exception(vm_t *vm, uint32_t cause, uint32_t val)
336
357
337
358
void vm_trap (vm_t * vm )
338
359
{
360
+ mmu_invalidate_caches (vm );
361
+
339
362
/* Fill exception fields */
340
363
vm -> scause = vm -> exc_cause ;
341
364
vm -> stval = vm -> exc_val ;
@@ -357,6 +380,8 @@ void vm_trap(vm_t *vm)
357
380
358
381
static void op_sret (vm_t * vm )
359
382
{
383
+ mmu_invalidate_caches (vm );
384
+
360
385
/* Restore from stack */
361
386
vm -> pc = vm -> sepc ;
362
387
vm -> s_mode = vm -> sstatus_spp ;
0 commit comments