Skip to content

Commit a651862

Browse files
Flavio Ceolincarlescufi
authored andcommitted
xtensa: Enable userspace
Userspace support for Xtensa architecture using Xtensa MMU. Some considerations: - Syscalls are not inline functions like in other architectures because some compiler issues when using multiple registers to pass parameters to the syscall. So here we have a function call so we can use registers as we need. - TLS is not supported by xcc in xtensa and reading PS register is a privileged instruction. So, we have to use threadptr to know if a thread is an user mode thread. Signed-off-by: Flavio Ceolin <[email protected]> Signed-off-by: Daniel Leung <[email protected]>
1 parent fff91cb commit a651862

File tree

19 files changed

+1646
-68
lines changed

19 files changed

+1646
-68
lines changed

arch/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,7 @@ config XTENSA
125125
select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD
126126
select ARCH_HAS_CODE_DATA_RELOCATION
127127
select ARCH_HAS_TIMING_FUNCTIONS
128+
select ARCH_MEM_DOMAIN_DATA if USERSPACE
128129
imply ATOMIC_OPERATIONS_ARCH
129130
help
130131
Xtensa architecture

arch/xtensa/Kconfig

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,7 @@ config XTENSA_MMU
113113
bool "Xtensa MMU Support"
114114
default n
115115
select MMU
116+
select ARCH_MEM_DOMAIN_SYNCHRONOUS_API if USERSPACE
116117
select XTENSA_SMALL_VECTOR_TABLE_ENTRY
117118
select KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK if XTENSA_RPO_CACHE
118119
help
@@ -144,8 +145,18 @@ if XTENSA_MMU
144145
The bit shift number for the virtual address for Xtensa
145146
page table (PTEVADDR).
146147

148+
config XTENSA_MMU_NUM_L1_TABLES
149+
int "Number of L1 page tables"
150+
default 1 if !USERSPACE
151+
default 4
152+
help
153+
This option specifies the maximum number of traslation tables.
154+
Translation tables are directly related to the number of
155+
memory domains in the target, considering the kernel itself requires one.
156+
147157
config XTENSA_MMU_NUM_L2_TABLES
148158
int "Number of L2 page tables"
159+
default 20 if USERSPACE
149160
default 10
150161
help
151162
Each table can address up to 4MB memory address.
@@ -159,6 +170,15 @@ if XTENSA_MMU
159170

160171
endif # XTENSA_MMU
161172

173+
config XTENSA_SYSCALL_USE_HELPER
174+
bool "Use userspace syscall helper"
175+
default y if "$(ZEPHYR_TOOLCHAIN_VARIANT)" = "xcc-clang"
176+
depends on USERSPACE
177+
help
178+
Use syscall helpers for passing more then 3 arguments.
179+
This is a workaround for toolchains where they have
180+
issue modeling register usage.
181+
162182
endif # CPU_HAS_MMU
163183

164184
endmenu

arch/xtensa/core/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@ zephyr_library_sources_ifdef(CONFIG_DEBUG_COREDUMP coredump.c)
2222
zephyr_library_sources_ifdef(CONFIG_TIMING_FUNCTIONS timing.c)
2323
zephyr_library_sources_ifdef(CONFIG_GDBSTUB gdbstub.c)
2424
zephyr_library_sources_ifdef(CONFIG_XTENSA_MMU xtensa_mmu.c)
25+
zephyr_library_sources_ifdef(CONFIG_USERSPACE userspace.S)
26+
zephyr_library_sources_ifdef(CONFIG_XTENSA_SYSCALL_USE_HELPER syscall_helper.c)
2527

2628
zephyr_library_sources_ifdef(
2729
CONFIG_KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK

arch/xtensa/core/fatal.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#endif
1616
#endif
1717
#include <zephyr/debug/coredump.h>
18+
#include <zephyr/arch/common/exc_handle.h>
1819
#include <zephyr/logging/log.h>
1920
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
2021

@@ -120,6 +121,14 @@ void z_xtensa_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
120121
z_fatal_error(reason, esf);
121122
}
122123

124+
#ifdef CONFIG_USERSPACE
125+
Z_EXC_DECLARE(z_xtensa_user_string_nlen);
126+
127+
static const struct z_exc_handle exceptions[] = {
128+
Z_EXC_HANDLE(z_xtensa_user_string_nlen)
129+
};
130+
#endif /* CONFIG_USERSPACE */
131+
123132
#ifdef XT_SIMULATOR
124133
void exit(int return_code)
125134
{

arch/xtensa/core/include/xtensa_mmu_priv.h

Lines changed: 81 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,18 +18,37 @@
1818
#define Z_XTENSA_PTE_VPN_MASK 0xFFFFF000U
1919
#define Z_XTENSA_PTE_PPN_MASK 0xFFFFF000U
2020
#define Z_XTENSA_PTE_ATTR_MASK 0x0000000FU
21+
#define Z_XTENSA_PTE_ATTR_CACHED_MASK 0x0000000CU
2122
#define Z_XTENSA_L1_MASK 0x3FF00000U
2223
#define Z_XTENSA_L2_MASK 0x3FFFFFU
2324

2425
#define Z_XTENSA_PPN_SHIFT 12U
2526

2627
#define Z_XTENSA_PTE_RING_MASK 0x00000030U
28+
#define Z_XTENSA_PTE_RING_SHIFT 4U
2729

2830
#define Z_XTENSA_PTE(paddr, ring, attr) \
2931
(((paddr) & Z_XTENSA_PTE_PPN_MASK) | \
30-
(((ring) << 4) & Z_XTENSA_PTE_RING_MASK) | \
32+
(((ring) << Z_XTENSA_PTE_RING_SHIFT) & Z_XTENSA_PTE_RING_MASK) | \
3133
((attr) & Z_XTENSA_PTE_ATTR_MASK))
3234

35+
#define Z_XTENSA_PTE_ATTR_GET(pte) \
36+
(pte) & Z_XTENSA_PTE_ATTR_MASK
37+
38+
#define Z_XTENSA_PTE_ATTR_SET(pte, attr) \
39+
(((pte) & ~Z_XTENSA_PTE_ATTR_MASK) | (attr))
40+
41+
#define Z_XTENSA_PTE_RING_SET(pte, ring) \
42+
(((pte) & ~Z_XTENSA_PTE_RING_MASK) | \
43+
((ring) << Z_XTENSA_PTE_RING_SHIFT))
44+
45+
#define Z_XTENSA_PTE_RING_GET(pte) \
46+
(((pte) & ~Z_XTENSA_PTE_RING_MASK) >> Z_XTENSA_PTE_RING_SHIFT)
47+
48+
#define Z_XTENSA_PTE_ASID_GET(pte, rasid) \
49+
(((rasid) >> ((((pte) & Z_XTENSA_PTE_RING_MASK) \
50+
>> Z_XTENSA_PTE_RING_SHIFT) * 8)) & 0xFF)
51+
3352
#define Z_XTENSA_TLB_ENTRY(vaddr, way) \
3453
(((vaddr) & Z_XTENSA_PTE_PPN_MASK) | (way))
3554

@@ -38,11 +57,38 @@
3857
(((vaddr) >> Z_XTENSA_PPN_SHIFT) & 0x03U))
3958

4059
#define Z_XTENSA_L2_POS(vaddr) \
41-
(((vaddr) & Z_XTENSA_L2_MASK) >> Z_XTENSA_PPN_SHIFT)
60+
(((vaddr) & Z_XTENSA_L2_MASK) >> 12U)
61+
62+
#define Z_XTENSA_L1_POS(vaddr) \
63+
((vaddr) >> 22U)
64+
65+
/* PTE attributes for entries in the L1 page table. Should never be
66+
* writable, may be cached in non-SMP contexts only
67+
*/
68+
#if CONFIG_MP_MAX_NUM_CPUS == 1
69+
#define Z_XTENSA_PAGE_TABLE_ATTR Z_XTENSA_MMU_CACHED_WB
70+
#else
71+
#define Z_XTENSA_PAGE_TABLE_ATTR 0
72+
#endif
73+
74+
/* This ASID is shared between all domains and kernel. */
75+
#define Z_XTENSA_MMU_SHARED_ASID 255
76+
77+
/* Fixed data TLB way to map the page table */
78+
#define Z_XTENSA_MMU_PTE_WAY 7
79+
80+
/* Fixed data TLB way to map the vecbase */
81+
#define Z_XTENSA_MMU_VECBASE_WAY 8
4282

4383
/* Kernel specific ASID. Ring field in the PTE */
4484
#define Z_XTENSA_KERNEL_RING 0
4585

86+
/* User specific ASID. Ring field in the PTE */
87+
#define Z_XTENSA_USER_RING 2
88+
89+
/* Ring value for MMU_SHARED_ASID */
90+
#define Z_XTENSA_SHARED_RING 3
91+
4692
/* Number of data TLB ways [0-9] */
4793
#define Z_XTENSA_DTLB_WAYS 10
4894

@@ -96,6 +142,14 @@
96142
#define Z_XTENSA_PAGE_TABLE_VADDR \
97143
Z_XTENSA_PTE_ENTRY_VADDR(Z_XTENSA_PTEVADDR)
98144

145+
/*
146+
* Get asid for a given ring from rasid register.
147+
* rasid contains four asid, one per ring.
148+
*/
149+
150+
#define Z_XTENSA_RASID_ASID_GET(rasid, ring) \
151+
(((rasid) >> ((ring) * 8)) & 0xff)
152+
99153
static ALWAYS_INLINE void xtensa_rasid_set(uint32_t rasid)
100154
{
101155
__asm__ volatile("wsr %0, rasid\n\t"
@@ -110,6 +164,16 @@ static ALWAYS_INLINE uint32_t xtensa_rasid_get(void)
110164
return rasid;
111165
}
112166

167+
static ALWAYS_INLINE void xtensa_rasid_asid_set(uint8_t asid, uint8_t pos)
168+
{
169+
uint32_t rasid = xtensa_rasid_get();
170+
171+
rasid = (rasid & ~(0xff << (pos * 8))) | ((uint32_t)asid << (pos * 8));
172+
173+
xtensa_rasid_set(rasid);
174+
}
175+
176+
113177
static ALWAYS_INLINE void xtensa_itlb_entry_invalidate(uint32_t entry)
114178
{
115179
__asm__ volatile("iitlb %0\n\t"
@@ -201,6 +265,21 @@ static ALWAYS_INLINE void xtensa_ptevaddr_set(void *ptables)
201265
__asm__ volatile("wsr.ptevaddr %0" : : "a"((uint32_t)ptables));
202266
}
203267

268+
/**
269+
* @brief Get the current page tables.
270+
*
271+
* The page tables is obtained by reading ptevaddr address.
272+
*
273+
* @return ptables The page tables address (virtual address)
274+
*/
275+
static ALWAYS_INLINE void *xtensa_ptevaddr_get(void)
276+
{
277+
uint32_t ptables;
278+
279+
__asm__ volatile("rsr.ptevaddr %0" : "=a" (ptables));
280+
281+
return (void *)ptables;
282+
}
204283
/*
205284
* The following functions are helpful when debugging.
206285
*/

arch/xtensa/core/offsets/offsets.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
#include <gen_offset.h>
77
#include <kernel_offsets.h>
8+
#include <zephyr/arch/xtensa/thread.h>
89

910
#include <xtensa-asm2-context.h>
1011

@@ -60,4 +61,10 @@ GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu14);
6061
GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu15);
6162
#endif
6263

64+
#ifdef CONFIG_USERSPACE
65+
GEN_OFFSET_SYM(_thread_arch_t, psp);
66+
GEN_OFFSET_SYM(_thread_arch_t, ptables);
67+
#endif
68+
69+
6370
GEN_ABS_SYM_END

arch/xtensa/core/syscall_helper.c

Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
/*
2+
* Copyright (c) 2022 Intel Corporation.
3+
*
4+
* SPDX-License-Identifier: Apache-2.0
5+
*/
6+
7+
#include <zephyr/arch/xtensa/syscall.h>
8+
9+
uintptr_t arch_syscall_invoke6_helper(uintptr_t arg1, uintptr_t arg2,
10+
uintptr_t arg3, uintptr_t arg4,
11+
uintptr_t arg5, uintptr_t arg6,
12+
uintptr_t call_id)
13+
{
14+
register uintptr_t a2 __asm__("%a2") = call_id;
15+
register uintptr_t a6 __asm__("%a6") = arg1;
16+
register uintptr_t a3 __asm__("%a3") = arg2;
17+
register uintptr_t a4 __asm__("%a4") = arg3;
18+
register uintptr_t a5 __asm__("%a5") = arg4;
19+
register uintptr_t a8 __asm__("%a8") = arg5;
20+
register uintptr_t a9 __asm__("%a9") = arg6;
21+
22+
__asm__ volatile("syscall\n\t"
23+
: "=r" (a2)
24+
: "r" (a2), "r" (a6), "r" (a3), "r" (a4),
25+
"r" (a5), "r" (a8), "r" (a9)
26+
: "memory");
27+
28+
return a2;
29+
}
30+
31+
uintptr_t arch_syscall_invoke5_helper(uintptr_t arg1, uintptr_t arg2,
32+
uintptr_t arg3, uintptr_t arg4,
33+
uintptr_t arg5, uintptr_t call_id)
34+
{
35+
register uintptr_t a2 __asm__("%a2") = call_id;
36+
register uintptr_t a6 __asm__("%a6") = arg1;
37+
register uintptr_t a3 __asm__("%a3") = arg2;
38+
register uintptr_t a4 __asm__("%a4") = arg3;
39+
register uintptr_t a5 __asm__("%a5") = arg4;
40+
register uintptr_t a8 __asm__("%a8") = arg5;
41+
42+
__asm__ volatile("syscall\n\t"
43+
: "=r" (a2)
44+
: "r" (a2), "r" (a6), "r" (a3), "r" (a4),
45+
"r" (a5), "r" (a8)
46+
: "memory");
47+
48+
return a2;
49+
}
50+
51+
uintptr_t arch_syscall_invoke4_helper(uintptr_t arg1, uintptr_t arg2,
52+
uintptr_t arg3, uintptr_t arg4,
53+
uintptr_t call_id)
54+
{
55+
register uintptr_t a2 __asm__("%a2") = call_id;
56+
register uintptr_t a6 __asm__("%a6") = arg1;
57+
register uintptr_t a3 __asm__("%a3") = arg2;
58+
register uintptr_t a4 __asm__("%a4") = arg3;
59+
register uintptr_t a5 __asm__("%a5") = arg4;
60+
61+
__asm__ volatile("syscall\n\t"
62+
: "=r" (a2)
63+
: "r" (a2), "r" (a6), "r" (a3), "r" (a4),
64+
"r" (a5)
65+
: "memory");
66+
67+
return a2;
68+
}
69+
70+
uintptr_t arch_syscall_invoke3_helper(uintptr_t arg1, uintptr_t arg2,
71+
uintptr_t arg3, uintptr_t call_id)
72+
{
73+
register uintptr_t a2 __asm__("%a2") = call_id;
74+
register uintptr_t a6 __asm__("%a6") = arg1;
75+
register uintptr_t a3 __asm__("%a3") = arg2;
76+
register uintptr_t a4 __asm__("%a4") = arg3;
77+
78+
__asm__ volatile("syscall\n\t"
79+
: "=r" (a2)
80+
: "r" (a2), "r" (a6), "r" (a3), "r" (a4)
81+
: "memory");
82+
83+
return a2;
84+
}
85+
86+
uintptr_t arch_syscall_invoke2_helper(uintptr_t arg1, uintptr_t arg2,
87+
uintptr_t call_id)
88+
{
89+
register uintptr_t a2 __asm__("%a2") = call_id;
90+
register uintptr_t a6 __asm__("%a6") = arg1;
91+
register uintptr_t a3 __asm__("%a3") = arg2;
92+
93+
__asm__ volatile("syscall\n\t"
94+
: "=r" (a2)
95+
: "r" (a2), "r" (a6), "r" (a3)
96+
: "memory");
97+
98+
return a2;
99+
}
100+
101+
uintptr_t arch_syscall_invoke1_helper(uintptr_t arg1, uintptr_t call_id)
102+
{
103+
register uintptr_t a2 __asm__("%a2") = call_id;
104+
register uintptr_t a6 __asm__("%a6") = arg1;
105+
106+
__asm__ volatile("syscall\n\t"
107+
: "=r" (a2)
108+
: "r" (a2), "r" (a6)
109+
: "memory");
110+
111+
return a2;
112+
}
113+
114+
uintptr_t arch_syscall_invoke0_helper(uintptr_t call_id)
115+
{
116+
register uintptr_t a2 __asm__("%a2") = call_id;
117+
118+
__asm__ volatile("syscall\n\t"
119+
: "=r" (a2)
120+
: "r" (a2)
121+
: "memory");
122+
123+
return a2;
124+
}

0 commit comments

Comments
 (0)