Skip to content

Commit 79939e3

Browse files
dcpleungnashif
authored andcommitted
xtensa: mmu: mpu: add xtensa_mem_kernel_has_access()
This adds a new function xtensa_mem_kernel_has_access() to determine if a memory region can be accessed by kernel threads. This allows checking for valid mapped memory before accessing them to avoid relying on page faults to detect invalid access. Also fixed an issue with arch_buffer_validate() on MPU where it may return okay even if the incoming memory region has no corresponding entry in the MPU table. Signed-off-by: Daniel Leung <[email protected]>
1 parent 61ec0d1 commit 79939e3

File tree

4 files changed

+137
-2
lines changed

4 files changed

+137
-2
lines changed

arch/xtensa/core/mpu.c

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -989,6 +989,14 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
989989
offset += XCHAL_MPU_ALIGN) {
990990
uint32_t probed = xtensa_pptlb_probe(aligned_addr + offset);
991991

992+
if ((probed & XTENSA_MPU_PROBE_VALID_ENTRY_MASK) == 0U) {
993+
/* There is no foreground or background entry associated
994+
* with the region.
995+
*/
996+
ret = -EPERM;
997+
goto out;
998+
}
999+
9921000
uint8_t access_rights = (probed & XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK)
9931001
>> XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT;
9941002

@@ -1037,6 +1045,95 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
10371045
return ret;
10381046
}
10391047

1048+
bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write)
1049+
{
1050+
uintptr_t aligned_addr;
1051+
size_t aligned_size, addr_offset;
1052+
bool ret = true;
1053+
1054+
/* addr/size arbitrary, fix this up into an aligned region */
1055+
aligned_addr = ROUND_DOWN((uintptr_t)addr, XCHAL_MPU_ALIGN);
1056+
addr_offset = (uintptr_t)addr - aligned_addr;
1057+
aligned_size = ROUND_UP(size + addr_offset, XCHAL_MPU_ALIGN);
1058+
1059+
for (size_t offset = 0; offset < aligned_size;
1060+
offset += XCHAL_MPU_ALIGN) {
1061+
uint32_t probed = xtensa_pptlb_probe(aligned_addr + offset);
1062+
1063+
if ((probed & XTENSA_MPU_PROBE_VALID_ENTRY_MASK) == 0U) {
1064+
/* There is no foreground or background entry associated
1065+
* with the region.
1066+
*/
1067+
ret = false;
1068+
goto out;
1069+
}
1070+
1071+
uint8_t access_rights = (probed & XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK)
1072+
>> XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT;
1073+
1074+
1075+
if (write != 0) {
1076+
/* Need to check write permission. */
1077+
switch (access_rights) {
1078+
case XTENSA_MPU_ACCESS_P_RW_U_NA:
1079+
__fallthrough;
1080+
case XTENSA_MPU_ACCESS_P_RWX_U_NA:
1081+
__fallthrough;
1082+
case XTENSA_MPU_ACCESS_P_WO_U_WO:
1083+
__fallthrough;
1084+
case XTENSA_MPU_ACCESS_P_RW_U_RWX:
1085+
__fallthrough;
1086+
case XTENSA_MPU_ACCESS_P_RW_U_RO:
1087+
__fallthrough;
1088+
case XTENSA_MPU_ACCESS_P_RWX_U_RX:
1089+
__fallthrough;
1090+
case XTENSA_MPU_ACCESS_P_RW_U_RW:
1091+
__fallthrough;
1092+
case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
1093+
/* These permissions are okay. */
1094+
break;
1095+
default:
1096+
ret = false;
1097+
goto out;
1098+
}
1099+
} else {
1100+
/* Only check read permission. */
1101+
switch (access_rights) {
1102+
case XTENSA_MPU_ACCESS_P_RO_U_NA:
1103+
__fallthrough;
1104+
case XTENSA_MPU_ACCESS_P_RX_U_NA:
1105+
__fallthrough;
1106+
case XTENSA_MPU_ACCESS_P_RW_U_NA:
1107+
__fallthrough;
1108+
case XTENSA_MPU_ACCESS_P_RWX_U_NA:
1109+
__fallthrough;
1110+
case XTENSA_MPU_ACCESS_P_RW_U_RWX:
1111+
__fallthrough;
1112+
case XTENSA_MPU_ACCESS_P_RW_U_RO:
1113+
__fallthrough;
1114+
case XTENSA_MPU_ACCESS_P_RWX_U_RX:
1115+
__fallthrough;
1116+
case XTENSA_MPU_ACCESS_P_RO_U_RO:
1117+
__fallthrough;
1118+
case XTENSA_MPU_ACCESS_P_RX_U_RX:
1119+
__fallthrough;
1120+
case XTENSA_MPU_ACCESS_P_RW_U_RW:
1121+
__fallthrough;
1122+
case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
1123+
/* These permissions are okay. */
1124+
break;
1125+
default:
1126+
ret = false;
1127+
goto out;
1128+
}
1129+
}
1130+
}
1131+
1132+
out:
1133+
return ret;
1134+
}
1135+
1136+
10401137
void xtensa_user_stack_perms(struct k_thread *thread)
10411138
{
10421139
int ret;

arch/xtensa/core/ptables.c

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1076,14 +1076,13 @@ static bool page_validate(uint32_t *ptables, uint32_t page, uint8_t ring, bool w
10761076
return true;
10771077
}
10781078

1079-
int arch_buffer_validate(const void *addr, size_t size, int write)
1079+
static int mem_buffer_validate(const void *addr, size_t size, int write, int ring)
10801080
{
10811081
int ret = 0;
10821082
uint8_t *virt;
10831083
size_t aligned_size;
10841084
const struct k_thread *thread = _current;
10851085
uint32_t *ptables = thread_page_tables_get(thread);
1086-
uint8_t ring = XTENSA_MMU_USER_RING;
10871086

10881087
/* addr/size arbitrary, fix this up into an aligned region */
10891088
k_mem_region_align((uintptr_t *)&virt, &aligned_size,
@@ -1100,6 +1099,16 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
11001099
return ret;
11011100
}
11021101

1102+
bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write)
1103+
{
1104+
return mem_buffer_validate(addr, size, write, XTENSA_MMU_KERNEL_RING) == 0;
1105+
}
1106+
1107+
int arch_buffer_validate(const void *addr, size_t size, int write)
1108+
{
1109+
return mem_buffer_validate(addr, size, write, XTENSA_MMU_USER_RING);
1110+
}
1111+
11031112
void xtensa_swap_update_page_tables(struct k_thread *incoming)
11041113
{
11051114
uint32_t *ptables = incoming->arch.ptables;

arch/xtensa/include/xtensa_internal.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,25 @@ void xtensa_userspace_enter(k_thread_entry_t user_entry,
5555
uintptr_t stack_end,
5656
uintptr_t stack_start);
5757

58+
/**
59+
* @brief Check if kernel threads have access to a memory region.
60+
*
61+
* Given a memory region, return whether the current memory management
62+
* hardware configuration would allow kernel threads to read/write
63+
* that region.
64+
*
65+
* This is mainly used to make sure kernel has access to avoid relying
66+
* on page fault to detect invalid mappings.
67+
*
68+
* @param addr Start address of the buffer
69+
* @param size Size of the buffer
70+
* @param write If non-zero, additionally check if the area is writable.
71+
* Otherwise, just check if the memory can be read.
72+
*
73+
* @return False if the permissions don't match.
74+
*/
75+
bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write);
76+
5877
/**
5978
* @}
6079
*/

arch/xtensa/include/xtensa_mpu_priv.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,16 @@
6969
#define XTENSA_MPU_ENTRY_REG_MEMORY_TYPE_MASK \
7070
(0x1FFU << XTENSA_MPU_ENTRY_REG_MEMORY_TYPE_SHIFT)
7171

72+
/** Bit mask for foreground entry returned by probing. */
73+
#define XTENSA_MPU_PROBE_IS_FG_ENTRY_MASK BIT(31)
74+
75+
/** Bit mask for background entry returned by probing. */
76+
#define XTENSA_MPU_PROBE_IS_BG_ENTRY_MASK BIT(30)
77+
78+
/** Bit mask used to determine if entry is valid returned by probing. */
79+
#define XTENSA_MPU_PROBE_VALID_ENTRY_MASK \
80+
(XTENSA_MPU_PROBE_IS_FG_ENTRY_MASK | XTENSA_MPU_PROBE_IS_BG_ENTRY_MASK)
81+
7282
/**
7383
* @}
7484
*/

0 commit comments

Comments
 (0)