|
8 | 8 | * 2012-01-10 bernard porting to AM1808 |
9 | 9 | * 2021-11-28 GuEe-GUI first version |
10 | 10 | * 2022-12-10 WangXiaoyao porting to MM |
| 11 | + * 2024-07-08 Shell added support for ASID |
11 | 12 | */ |
12 | 13 |
|
13 | 14 | #define DBG_TAG "hw.mmu" |
14 | | -#define DBG_LVL DBG_LOG |
| 15 | +#define DBG_LVL DBG_INFO |
15 | 16 | #include <rtdbg.h> |
16 | 17 |
|
17 | 18 | #include <rthw.h> |
@@ -138,8 +139,8 @@ static int _kernel_map_4K(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsi |
138 | 139 | unsigned long *cur_lv_tbl = lv0_tbl; |
139 | 140 | unsigned long page; |
140 | 141 | unsigned long off; |
141 | | - intptr_t va = (intptr_t)vaddr; |
142 | | - intptr_t pa = (intptr_t)paddr; |
| 142 | + rt_ubase_t va = (rt_ubase_t)vaddr; |
| 143 | + rt_ubase_t pa = (rt_ubase_t)paddr; |
143 | 144 | int level_shift = MMU_ADDRESS_BITS; |
144 | 145 |
|
145 | 146 | if (va & ARCH_PAGE_MASK) |
@@ -345,23 +346,82 @@ void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size) |
345 | 346 | } |
346 | 347 | } |
347 | 348 |
|
| 349 | +#ifdef ARCH_USING_ASID |
| 350 | +/** |
| 351 | + * the asid is to identified specialized address space on TLB. |
| 352 | + * In the best case, each address space has its own exclusive asid. However, |
| 353 | + * ARM only guarantee with 8 bits of ID space, which give us only 254(except |
| 354 | + * the reserved 1 ASID for kernel). |
| 355 | + */ |
| 356 | + |
| 357 | +static rt_spinlock_t _asid_lock = RT_SPINLOCK_INIT; |
| 358 | + |
| 359 | +rt_uint16_t _aspace_get_asid(rt_aspace_t aspace) |
| 360 | +{ |
| 361 | + static rt_uint16_t _asid_pool = 0; |
| 362 | + rt_uint16_t asid_to, asid_from; |
| 363 | + rt_ubase_t ttbr0_from; |
| 364 | + |
| 365 | + asid_to = aspace->asid; |
| 366 | + if (asid_to == 0) |
| 367 | + { |
| 368 | + rt_spin_lock(&_asid_lock); |
| 369 | + #define MAX_ASID (1ul << MMU_SUPPORTED_ASID_BITS) |
| 370 | + if (_asid_pool && _asid_pool < MAX_ASID) |
| 371 | + { |
| 372 | + asid_to = ++_asid_pool; |
| 373 | + LOG_D("Allocated ASID %d to PID %d(aspace %p)", asid_to, lwp_self()->pid, aspace); |
| 374 | + } |
| 375 | + else |
| 376 | + { |
| 377 | + asid_to = _asid_pool = 1; |
| 378 | + LOG_D("Overflowed ASID %d to PID %d(aspace %p)", asid_to, lwp_self()->pid, aspace); |
| 379 | + } |
| 380 | + |
| 381 | + rt_spin_unlock(&_asid_lock); |
| 382 | + |
| 383 | + aspace->asid = asid_to; |
| 384 | + rt_hw_tlb_invalidate_aspace(aspace); |
| 385 | + } |
| 386 | + |
| 387 | + __asm__ volatile("mrs %0, ttbr0_el1" :"=r"(ttbr0_from)); |
| 388 | + asid_from = ttbr0_from >> MMU_ASID_SHIFT; |
| 389 | + if (asid_from == asid_to) |
| 390 | + { |
| 391 | + LOG_D("Conflict ASID. from %d, to %d", asid_from, asid_to); |
| 392 | + rt_hw_tlb_invalidate_aspace(aspace); |
| 393 | + } |
| 394 | + else |
| 395 | + { |
| 396 | + LOG_D("ASID switched. from %d, to %d", asid_from, asid_to); |
| 397 | + } |
| 398 | + |
| 399 | + return asid_to; |
| 400 | +} |
| 401 | + |
| 402 | +#else |
| 403 | + |
| 404 | + |
| 405 | +rt_uint16_t _aspace_get_asid(rt_aspace_t aspace) |
| 406 | +{ |
| 407 | + rt_hw_tlb_invalidate_all(); |
| 408 | + return 0; |
| 409 | +} |
| 410 | +#endif /* ARCH_USING_ASID */ |
| 411 | + |
| 412 | +#define CREATE_TTBR0(pgtbl, asid) ((rt_ubase_t)(pgtbl) | (rt_ubase_t)(asid) << MMU_ASID_SHIFT) |
348 | 413 | void rt_hw_aspace_switch(rt_aspace_t aspace) |
349 | 414 | { |
350 | 415 | if (aspace != &rt_kernel_space) |
351 | 416 | { |
| 417 | + rt_ubase_t ttbr0; |
352 | 418 | void *pgtbl = aspace->page_table; |
353 | 419 | pgtbl = rt_kmem_v2p(pgtbl); |
354 | | - rt_ubase_t tcr; |
355 | | - |
356 | | - __asm__ volatile("msr ttbr0_el1, %0" ::"r"(pgtbl) : "memory"); |
357 | 420 |
|
358 | | - __asm__ volatile("mrs %0, tcr_el1" : "=r"(tcr)); |
359 | | - tcr &= ~(1ul << 7); |
360 | | - __asm__ volatile("msr tcr_el1, %0\n" |
361 | | - "isb" ::"r"(tcr) |
362 | | - : "memory"); |
| 421 | + ttbr0 = CREATE_TTBR0(pgtbl, _aspace_get_asid(aspace)); |
363 | 422 |
|
364 | | - rt_hw_tlb_invalidate_all_local(); |
| 423 | + __asm__ volatile("msr ttbr0_el1, %0" ::"r"(ttbr0)); |
| 424 | + __asm__ volatile("isb" ::: "memory"); |
365 | 425 | } |
366 | 426 | } |
367 | 427 |
|
@@ -836,7 +896,7 @@ void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1, |
836 | 896 | { |
837 | 897 | int ret; |
838 | 898 | unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT; |
839 | | - unsigned long normal_attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM); |
| 899 | + unsigned long normal_attr = MMU_MAP_K_RWCB; |
840 | 900 | extern unsigned char _start; |
841 | 901 | unsigned long va = (unsigned long) &_start - pv_off; |
842 | 902 | va = RT_ALIGN_DOWN(va, 0x200000); |
|
0 commit comments