Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 2 additions & 25 deletions libcpu/Kconfig
Original file line number Diff line number Diff line change
@@ -1,29 +1,5 @@
if ARCH_ARMV8 && ARCH_CPU_64BIT
menu "AArch64 Architecture Configuration"
config ARCH_TEXT_OFFSET
hex "Text offset"
default 0x200000
config ARCH_RAM_OFFSET
hex "RAM offset"
default 0
config ARCH_SECONDARY_CPU_STACK_SIZE
int "Secondary CPU stack size"
default 4096
config ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS
bool
default y
config ARCH_USING_GENERIC_CPUID
bool "Using generic cpuid implemenation"
select ARCH_USING_HW_THREAD_SELF
default y if RT_USING_OFW
default n
config ARCH_HEAP_SIZE
hex "Size of system heap"
default 0x4000000
config ARCH_INIT_PAGE_SIZE
hex "Size of init page region"
default 0x200000
endmenu
orsource "./aarch64/Kconfig"
endif

config ARCH_CPU_64BIT
Expand Down Expand Up @@ -219,6 +195,7 @@ config ARCH_ARMV8
select ARCH_ARM
select ARCH_ARM_MMU
select RT_USING_CPU_FFS
select ARCH_USING_ASID
select ARCH_USING_IRQ_CTX_LIST

config ARCH_MIPS
Expand Down
25 changes: 25 additions & 0 deletions libcpu/aarch64/Kconfig
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
menu "AArch64 Architecture Configuration"
config ARCH_TEXT_OFFSET
hex "Text offset"
default 0x200000
config ARCH_RAM_OFFSET
hex "RAM offset"
default 0
config ARCH_SECONDARY_CPU_STACK_SIZE
int "Secondary CPU stack size"
default 4096
config ARCH_HAVE_EFFICIENT_UNALIGNED_ACCESS
bool
default y
config ARCH_USING_GENERIC_CPUID
bool "Using generic cpuid implemenation"
select ARCH_USING_HW_THREAD_SELF
default y if RT_USING_OFW
default n
config ARCH_HEAP_SIZE
hex "Size of system heap"
default 0x4000000
config ARCH_INIT_PAGE_SIZE
hex "Size of init page region"
default 0x200000
endmenu
30 changes: 18 additions & 12 deletions libcpu/aarch64/common/include/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,32 +41,38 @@ struct mem_desc
#define RT_HW_MMU_PROT_USER 16
#define RT_HW_MMU_PROT_CACHE 32

#define MMU_ASID_SHIFT 48
#define MMU_NG_SHIFT 11 /* not global bit */
#define MMU_AF_SHIFT 10
#define MMU_SHARED_SHIFT 8
#define MMU_AP_SHIFT 6
#define MMU_MA_SHIFT 2
#define MMU_AP_MASK (0x3 << MMU_AP_SHIFT)

/* we dont support feat detecting for now, so 8-bit is used to fallback */
#define MMU_SUPPORTED_ASID_BITS 8

#define MMU_AP_KAUN 0UL /* kernel r/w, user none */
#define MMU_AP_KAUA 1UL /* kernel r/w, user r/w */
#define MMU_AP_KRUN 2UL /* kernel r, user none */
#define MMU_AP_KRUR 3UL /* kernel r, user r */
#define MMU_ATTR_AF (1ul << MMU_AF_SHIFT) /* the access flag */
#define MMU_ATTR_DBM (1ul << 51) /* the dirty bit modifier */

#define MMU_MAP_CUSTOM(ap, mtype) \
#define MMU_MAP_CUSTOM(ap, mtype, nglobal) \
((0x1UL << MMU_AF_SHIFT) | (0x2UL << MMU_SHARED_SHIFT) | \
((ap) << MMU_AP_SHIFT) | ((mtype) << MMU_MA_SHIFT))
#define MMU_MAP_K_ROCB MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_MEM)
#define MMU_MAP_K_RO MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_NOCACHE_MEM)
#define MMU_MAP_K_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM)
#define MMU_MAP_K_RW MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_NOCACHE_MEM)
#define MMU_MAP_K_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUN, DEVICE_MEM)
#define MMU_MAP_U_ROCB MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_MEM)
#define MMU_MAP_U_RO MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_NOCACHE_MEM)
#define MMU_MAP_U_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_MEM)
#define MMU_MAP_U_RW MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_NOCACHE_MEM)
#define MMU_MAP_U_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUA, DEVICE_MEM)
((ap) << MMU_AP_SHIFT) | ((mtype) << MMU_MA_SHIFT)) | \
((rt_ubase_t)(nglobal) << MMU_NG_SHIFT)
#define MMU_MAP_K_ROCB MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_MEM, 0)
#define MMU_MAP_K_RO MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_NOCACHE_MEM, 0)
#define MMU_MAP_K_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM, 0)
#define MMU_MAP_K_RW MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_NOCACHE_MEM, 0)
#define MMU_MAP_K_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUN, DEVICE_MEM, 0)
#define MMU_MAP_U_ROCB MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_MEM, 1)
#define MMU_MAP_U_RO MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_NOCACHE_MEM, 1)
#define MMU_MAP_U_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_MEM, 1)
#define MMU_MAP_U_RW MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_NOCACHE_MEM, 1)
#define MMU_MAP_U_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUA, DEVICE_MEM, 1)
#define MMU_MAP_TRACE(attr) ((attr) & ~(MMU_ATTR_AF | MMU_ATTR_DBM))

#define ARCH_SECTION_SHIFT 21
Expand Down
15 changes: 13 additions & 2 deletions libcpu/aarch64/common/include/tlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@

#define TLBI_ARG(addr, asid) \
({ \
uintptr_t arg = (uintptr_t)(addr) >> 12; \
rt_ubase_t arg = (rt_ubase_t)(addr) >> ARCH_PAGE_SHIFT; \
arg &= (1ull << 44) - 1; \
arg |= (uintptr_t)(asid) << 48; \
arg |= (rt_ubase_t)(asid) << MMU_ASID_SHIFT; \
(void *)arg; \
})

Expand Down Expand Up @@ -50,7 +50,18 @@ static inline void rt_hw_tlb_invalidate_all_local(void)

static inline void rt_hw_tlb_invalidate_aspace(rt_aspace_t aspace)
{
#ifdef ARCH_USING_ASID
__asm__ volatile(
// ensure updates to pte completed
"dsb nshst\n"
"tlbi aside1is, %0\n"
"dsb nsh\n"
// after tlb in new context, refresh inst
"isb\n" ::"r"(TLBI_ARG(0ul, aspace->asid))
: "memory");
#else
rt_hw_tlb_invalidate_all();
#endif
}

static inline void rt_hw_tlb_invalidate_page(rt_aspace_t aspace, void *start)
Expand Down
86 changes: 73 additions & 13 deletions libcpu/aarch64/common/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,11 @@
* 2012-01-10 bernard porting to AM1808
* 2021-11-28 GuEe-GUI first version
* 2022-12-10 WangXiaoyao porting to MM
* 2024-07-08 Shell added support for ASID
*/

#define DBG_TAG "hw.mmu"
#define DBG_LVL DBG_LOG
#define DBG_LVL DBG_INFO
#include <rtdbg.h>

#include <rthw.h>
Expand Down Expand Up @@ -138,8 +139,8 @@ static int _kernel_map_4K(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsi
unsigned long *cur_lv_tbl = lv0_tbl;
unsigned long page;
unsigned long off;
intptr_t va = (intptr_t)vaddr;
intptr_t pa = (intptr_t)paddr;
rt_ubase_t va = (rt_ubase_t)vaddr;
rt_ubase_t pa = (rt_ubase_t)paddr;
int level_shift = MMU_ADDRESS_BITS;

if (va & ARCH_PAGE_MASK)
Expand Down Expand Up @@ -345,23 +346,82 @@ void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
}
}

#ifdef ARCH_USING_ASID
/**
* the asid is to identified specialized address space on TLB.
* In the best case, each address space has its own exclusive asid. However,
* ARM only guarantee with 8 bits of ID space, which give us only 254(except
* the reserved 1 ASID for kernel).
*/

static rt_spinlock_t _asid_lock = RT_SPINLOCK_INIT;

rt_uint16_t _aspace_get_asid(rt_aspace_t aspace)
{
static rt_uint16_t _asid_pool = 0;
rt_uint16_t asid_to, asid_from;
rt_ubase_t ttbr0_from;

asid_to = aspace->asid;
if (asid_to == 0)
{
rt_spin_lock(&_asid_lock);
#define MAX_ASID (1ul << MMU_SUPPORTED_ASID_BITS)
if (_asid_pool && _asid_pool < MAX_ASID)
{
asid_to = ++_asid_pool;
LOG_D("Allocated ASID %d to PID %d(aspace %p)", asid_to, lwp_self()->pid, aspace);
}
else
{
asid_to = _asid_pool = 1;
LOG_D("Overflowed ASID %d to PID %d(aspace %p)", asid_to, lwp_self()->pid, aspace);
}

rt_spin_unlock(&_asid_lock);

aspace->asid = asid_to;
rt_hw_tlb_invalidate_aspace(aspace);
}

__asm__ volatile("mrs %0, ttbr0_el1" :"=r"(ttbr0_from));
asid_from = ttbr0_from >> MMU_ASID_SHIFT;
if (asid_from == asid_to)
{
LOG_D("Conflict ASID. from %d, to %d", asid_from, asid_to);
rt_hw_tlb_invalidate_aspace(aspace);
}
else
{
LOG_D("ASID switched. from %d, to %d", asid_from, asid_to);
}

return asid_to;
}

#else


rt_uint16_t _aspace_get_asid(rt_aspace_t aspace)
{
rt_hw_tlb_invalidate_all();
return 0;
}
#endif /* ARCH_USING_ASID */

#define CREATE_TTBR0(pgtbl, asid) ((rt_ubase_t)(pgtbl) | (rt_ubase_t)(asid) << MMU_ASID_SHIFT)
void rt_hw_aspace_switch(rt_aspace_t aspace)
{
if (aspace != &rt_kernel_space)
{
rt_ubase_t ttbr0;
void *pgtbl = aspace->page_table;
pgtbl = rt_kmem_v2p(pgtbl);
rt_ubase_t tcr;

__asm__ volatile("msr ttbr0_el1, %0" ::"r"(pgtbl) : "memory");

__asm__ volatile("mrs %0, tcr_el1" : "=r"(tcr));
tcr &= ~(1ul << 7);
__asm__ volatile("msr tcr_el1, %0\n"
"isb" ::"r"(tcr)
: "memory");
ttbr0 = CREATE_TTBR0(pgtbl, _aspace_get_asid(aspace));

rt_hw_tlb_invalidate_all_local();
__asm__ volatile("msr ttbr0_el1, %0" ::"r"(ttbr0));
__asm__ volatile("isb" ::: "memory");
}
}

Expand Down Expand Up @@ -836,7 +896,7 @@ void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
{
int ret;
unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
unsigned long normal_attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM);
unsigned long normal_attr = MMU_MAP_K_RWCB;
extern unsigned char _start;
unsigned long va = (unsigned long) &_start - pv_off;
va = RT_ALIGN_DOWN(va, 0x200000);
Expand Down
Loading