Skip to content

Commit 1cd7ccc

Browse files
dcpleungnashif
authored andcommitted
kernel: mem_domain: arch_mem_domain functions to return errors
This changes the arch_mem_domain_*() functions to return errors. This allows the callers a chance to recover if needed. Note that: () For assertions where it can bail out early without side effects, these are converted to CHECKIF(). (Usually means that updating of page tables or translation tables has not been started yet.) () Other assertions are retained to signal fatal errors during development. () The additional CHECKIF() are structured so that it will bail early if possible. If errors are encountered inside a loop, it will still continue with the loop so it works as before this changes with assertions disabled. Signed-off-by: Daniel Leung <[email protected]>
1 parent bb595a8 commit 1cd7ccc

File tree

6 files changed

+429
-160
lines changed

6 files changed

+429
-160
lines changed

arch/arm64/core/cortex_r/arm_mpu.c

Lines changed: 94 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <arch/arm64/mm.h>
1313
#include <linker/linker-defs.h>
1414
#include <logging/log.h>
15+
#include <sys/check.h>
1516

1617
LOG_MODULE_REGISTER(mpu, CONFIG_MPU_LOG_LEVEL);
1718

@@ -247,8 +248,13 @@ static int dynamic_areas_init(uintptr_t start, size_t size)
247248
static int dup_dynamic_regions(struct dynamic_region_info *dst, int len)
248249
{
249250
size_t i;
251+
int ret = sys_dyn_regions_num;
250252

251-
__ASSERT(sys_dyn_regions_num < len, "system dynamic region nums too large.");
253+
CHECKIF(!(sys_dyn_regions_num < len)) {
254+
LOG_ERR("system dynamic region nums too large.");
255+
ret = -EINVAL;
256+
goto out;
257+
}
252258

253259
for (i = 0; i < sys_dyn_regions_num; i++) {
254260
dst[i] = sys_dyn_regions[i];
@@ -257,7 +263,8 @@ static int dup_dynamic_regions(struct dynamic_region_info *dst, int len)
257263
dst[i].index = -1;
258264
}
259265

260-
return sys_dyn_regions_num;
266+
out:
267+
return ret;
261268
}
262269

263270
static void set_region(struct arm_mpu_region *region,
@@ -283,10 +290,10 @@ static int get_underlying_region_idx(struct dynamic_region_info *dyn_regions,
283290
return -1;
284291
}
285292

286-
static uint8_t insert_region(struct dynamic_region_info *dyn_regions,
287-
uint8_t region_idx, uint8_t region_num,
288-
uintptr_t start, size_t size,
289-
struct arm_mpu_region_attr *attr)
293+
static int insert_region(struct dynamic_region_info *dyn_regions,
294+
uint8_t region_idx, uint8_t region_num,
295+
uintptr_t start, size_t size,
296+
struct arm_mpu_region_attr *attr)
290297
{
291298

292299
/* base: inclusive, limit: exclusive */
@@ -297,15 +304,23 @@ static uint8_t insert_region(struct dynamic_region_info *dyn_regions,
297304
uint64_t u_base;
298305
uint64_t u_limit;
299306
struct arm_mpu_region_attr *u_attr;
300-
301-
302-
__ASSERT(region_idx < region_num,
303-
"Out-of-bounds error for dynamic region map. region idx: %d, region num: %d",
304-
region_idx, region_num);
307+
int ret = 0;
308+
309+
CHECKIF(!(region_idx < region_num)) {
310+
LOG_ERR("Out-of-bounds error for dynamic region map. "
311+
"region idx: %d, region num: %d",
312+
region_idx, region_num);
313+
ret = -EINVAL;
314+
goto out;
315+
}
305316

306317
u_idx = get_underlying_region_idx(dyn_regions, region_idx, base, limit);
307318

308-
__ASSERT(u_idx >= 0, "Invalid underlying region index");
319+
CHECKIF(!(u_idx >= 0)) {
320+
LOG_ERR("Invalid underlying region index");
321+
ret = -ENOENT;
322+
goto out;
323+
}
309324

310325
/* Get underlying region range and attr */
311326
u_region = &(dyn_regions[u_idx].region_conf);
@@ -339,13 +354,18 @@ static uint8_t insert_region(struct dynamic_region_info *dyn_regions,
339354
region_idx++;
340355
}
341356

342-
return region_idx;
357+
ret = region_idx;
358+
359+
out:
360+
return ret;
343361
}
344362

345363
static int flush_dynamic_regions_to_mpu(struct dynamic_region_info *dyn_regions,
346364
uint8_t region_num)
347365
{
348366
int reg_avail_idx = static_regions_num;
367+
int ret = 0;
368+
349369
/*
350370
* Clean the dynamic regions
351371
*/
@@ -371,16 +391,20 @@ static int flush_dynamic_regions_to_mpu(struct dynamic_region_info *dyn_regions,
371391
if (region_idx < 0) {
372392
region_idx = reg_avail_idx++;
373393
}
374-
__ASSERT(region_idx < get_num_regions(),
375-
"Out-of-bounds error for mpu regions. region idx: %d, total mpu regions: %d",
376-
region_idx, get_num_regions());
394+
CHECKIF(!(region_idx < get_num_regions())) {
395+
LOG_ERR("Out-of-bounds error for mpu regions. "
396+
"region idx: %d, total mpu regions: %d",
397+
region_idx, get_num_regions());
398+
ret = -ENOENT;
399+
}
400+
377401
region_init(region_idx, &(dyn_regions[i].region_conf));
378402
}
379403

380-
return 0;
404+
return ret;
381405
}
382406

383-
static void configure_dynamic_mpu_regions(struct k_thread *thread)
407+
static int configure_dynamic_mpu_regions(struct k_thread *thread)
384408
{
385409
/*
386410
* Allocate double space for dyn_regions. Because when split
@@ -390,8 +414,15 @@ static void configure_dynamic_mpu_regions(struct k_thread *thread)
390414
struct dynamic_region_info dyn_regions[_MAX_DYNAMIC_MPU_REGIONS_NUM * 2];
391415
const uint8_t max_region_num = ARRAY_SIZE(dyn_regions);
392416
uint8_t region_num;
417+
int ret = 0, ret2;
393418

394-
region_num = dup_dynamic_regions(dyn_regions, max_region_num);
419+
ret2 = dup_dynamic_regions(dyn_regions, max_region_num);
420+
CHECKIF(ret2 < 0) {
421+
ret = ret2;
422+
goto out;
423+
}
424+
425+
region_num = (uint8_t)ret2;
395426

396427
struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain;
397428

@@ -409,29 +440,42 @@ static void configure_dynamic_mpu_regions(struct k_thread *thread)
409440
}
410441
LOG_DBG("set region 0x%lx 0x%lx",
411442
partition->start, partition->size);
412-
region_num = insert_region(dyn_regions,
413-
region_num,
414-
max_region_num,
415-
partition->start,
416-
partition->size,
417-
&partition->attr);
443+
ret2 = insert_region(dyn_regions,
444+
region_num,
445+
max_region_num,
446+
partition->start,
447+
partition->size,
448+
&partition->attr);
449+
CHECKIF(ret2 != 0) {
450+
ret = ret2;
451+
}
452+
453+
region_num = (uint8_t)ret2;
418454
}
419455
}
420456

421457
LOG_DBG("configure user thread %p's context", thread);
422458
if ((thread->base.user_options & K_USER) != 0) {
423459
/* K_USER thread stack needs a region */
424-
region_num = insert_region(dyn_regions,
425-
region_num,
426-
max_region_num,
427-
thread->stack_info.start,
428-
thread->stack_info.size,
429-
&K_MEM_PARTITION_P_RW_U_RW);
460+
ret2 = insert_region(dyn_regions,
461+
region_num,
462+
max_region_num,
463+
thread->stack_info.start,
464+
thread->stack_info.size,
465+
&K_MEM_PARTITION_P_RW_U_RW);
466+
CHECKIF(ret2 != 0) {
467+
ret = ret2;
468+
}
469+
470+
region_num = (uint8_t)ret2;
430471
}
431472

432473
arm_core_mpu_disable();
433-
flush_dynamic_regions_to_mpu(dyn_regions, region_num);
474+
ret = flush_dynamic_regions_to_mpu(dyn_regions, region_num);
434475
arm_core_mpu_enable();
476+
477+
out:
478+
return ret;
435479
}
436480

437481
int arch_mem_domain_max_partitions_get(void)
@@ -445,42 +489,54 @@ int arch_mem_domain_max_partitions_get(void)
445489
return max_parts;
446490
}
447491

448-
void arch_mem_domain_partition_add(struct k_mem_domain *domain, uint32_t partition_id)
492+
int arch_mem_domain_partition_add(struct k_mem_domain *domain, uint32_t partition_id)
449493
{
450494
ARG_UNUSED(domain);
451495
ARG_UNUSED(partition_id);
496+
497+
return 0;
452498
}
453499

454-
void arch_mem_domain_partition_remove(struct k_mem_domain *domain, uint32_t partition_id)
500+
int arch_mem_domain_partition_remove(struct k_mem_domain *domain, uint32_t partition_id)
455501
{
456502
ARG_UNUSED(domain);
457503
ARG_UNUSED(partition_id);
504+
505+
return 0;
458506
}
459507

460-
void arch_mem_domain_thread_add(struct k_thread *thread)
508+
int arch_mem_domain_thread_add(struct k_thread *thread)
461509
{
510+
int ret = 0;
511+
462512
if (thread == _current) {
463-
configure_dynamic_mpu_regions(thread);
513+
ret = configure_dynamic_mpu_regions(thread);
464514
}
465515
#ifdef CONFIG_SMP
466516
else {
467517
/* the thread could be running on another CPU right now */
468518
z_arm64_mem_cfg_ipi();
469519
}
470520
#endif
521+
522+
return ret;
471523
}
472524

473-
void arch_mem_domain_thread_remove(struct k_thread *thread)
525+
int arch_mem_domain_thread_remove(struct k_thread *thread)
474526
{
527+
int ret = 0;
528+
475529
if (thread == _current) {
476-
configure_dynamic_mpu_regions(thread);
530+
ret = configure_dynamic_mpu_regions(thread);
477531
}
478532
#ifdef CONFIG_SMP
479533
else {
480534
/* the thread could be running on another CPU right now */
481535
z_arm64_mem_cfg_ipi();
482536
}
483537
#endif
538+
539+
return ret;
484540
}
485541

486542
void z_arm64_thread_mem_domains_init(struct k_thread *thread)

arch/arm64/core/mmu.c

Lines changed: 32 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1006,8 +1006,8 @@ int arch_mem_domain_init(struct k_mem_domain *domain)
10061006
return 0;
10071007
}
10081008

1009-
static void private_map(struct arm_mmu_ptables *ptables, const char *name,
1010-
uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs)
1009+
static int private_map(struct arm_mmu_ptables *ptables, const char *name,
1010+
uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs)
10111011
{
10121012
int ret;
10131013

@@ -1018,10 +1018,12 @@ static void private_map(struct arm_mmu_ptables *ptables, const char *name,
10181018
if (is_ptable_active(ptables)) {
10191019
invalidate_tlb_all();
10201020
}
1021+
1022+
return ret;
10211023
}
10221024

1023-
static void reset_map(struct arm_mmu_ptables *ptables, const char *name,
1024-
uintptr_t addr, size_t size)
1025+
static int reset_map(struct arm_mmu_ptables *ptables, const char *name,
1026+
uintptr_t addr, size_t size)
10251027
{
10261028
int ret;
10271029

@@ -1030,40 +1032,44 @@ static void reset_map(struct arm_mmu_ptables *ptables, const char *name,
10301032
if (is_ptable_active(ptables)) {
10311033
invalidate_tlb_all();
10321034
}
1035+
1036+
return ret;
10331037
}
10341038

1035-
void arch_mem_domain_partition_add(struct k_mem_domain *domain,
1036-
uint32_t partition_id)
1039+
int arch_mem_domain_partition_add(struct k_mem_domain *domain,
1040+
uint32_t partition_id)
10371041
{
10381042
struct arm_mmu_ptables *domain_ptables = &domain->arch.ptables;
10391043
struct k_mem_partition *ptn = &domain->partitions[partition_id];
10401044

1041-
private_map(domain_ptables, "partition", ptn->start, ptn->start,
1042-
ptn->size, ptn->attr.attrs | MT_NORMAL);
1045+
return private_map(domain_ptables, "partition", ptn->start, ptn->start,
1046+
ptn->size, ptn->attr.attrs | MT_NORMAL);
10431047
}
10441048

1045-
void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
1046-
uint32_t partition_id)
1049+
int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
1050+
uint32_t partition_id)
10471051
{
10481052
struct arm_mmu_ptables *domain_ptables = &domain->arch.ptables;
10491053
struct k_mem_partition *ptn = &domain->partitions[partition_id];
10501054

1051-
reset_map(domain_ptables, "partition removal", ptn->start, ptn->size);
1055+
return reset_map(domain_ptables, "partition removal",
1056+
ptn->start, ptn->size);
10521057
}
10531058

1054-
static void map_thread_stack(struct k_thread *thread,
1055-
struct arm_mmu_ptables *ptables)
1059+
static int map_thread_stack(struct k_thread *thread,
1060+
struct arm_mmu_ptables *ptables)
10561061
{
1057-
private_map(ptables, "thread_stack", thread->stack_info.start,
1058-
thread->stack_info.start, thread->stack_info.size,
1059-
MT_P_RW_U_RW | MT_NORMAL);
1062+
return private_map(ptables, "thread_stack", thread->stack_info.start,
1063+
thread->stack_info.start, thread->stack_info.size,
1064+
MT_P_RW_U_RW | MT_NORMAL);
10601065
}
10611066

1062-
void arch_mem_domain_thread_add(struct k_thread *thread)
1067+
int arch_mem_domain_thread_add(struct k_thread *thread)
10631068
{
10641069
struct arm_mmu_ptables *old_ptables, *domain_ptables;
10651070
struct k_mem_domain *domain;
10661071
bool is_user, is_migration;
1072+
int ret = 0;
10671073

10681074
domain = thread->mem_domain_info.mem_domain;
10691075
domain_ptables = &domain->arch.ptables;
@@ -1073,7 +1079,7 @@ void arch_mem_domain_thread_add(struct k_thread *thread)
10731079
is_migration = (old_ptables != NULL) && is_user;
10741080

10751081
if (is_migration) {
1076-
map_thread_stack(thread, domain_ptables);
1082+
ret = map_thread_stack(thread, domain_ptables);
10771083
}
10781084

10791085
thread->arch.ptables = domain_ptables;
@@ -1089,12 +1095,14 @@ void arch_mem_domain_thread_add(struct k_thread *thread)
10891095
}
10901096

10911097
if (is_migration) {
1092-
reset_map(old_ptables, __func__, thread->stack_info.start,
1098+
ret = reset_map(old_ptables, __func__, thread->stack_info.start,
10931099
thread->stack_info.size);
10941100
}
1101+
1102+
return ret;
10951103
}
10961104

1097-
void arch_mem_domain_thread_remove(struct k_thread *thread)
1105+
int arch_mem_domain_thread_remove(struct k_thread *thread)
10981106
{
10991107
struct arm_mmu_ptables *domain_ptables;
11001108
struct k_mem_domain *domain;
@@ -1103,15 +1111,15 @@ void arch_mem_domain_thread_remove(struct k_thread *thread)
11031111
domain_ptables = &domain->arch.ptables;
11041112

11051113
if ((thread->base.user_options & K_USER) == 0) {
1106-
return;
1114+
return 0;
11071115
}
11081116

11091117
if ((thread->base.thread_state & _THREAD_DEAD) == 0) {
1110-
return;
1118+
return 0;
11111119
}
11121120

1113-
reset_map(domain_ptables, __func__, thread->stack_info.start,
1114-
thread->stack_info.size);
1121+
return reset_map(domain_ptables, __func__, thread->stack_info.start,
1122+
thread->stack_info.size);
11151123
}
11161124

11171125
static void z_arm64_swap_ptables(struct k_thread *incoming)

0 commit comments

Comments
 (0)